code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (_a ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 50 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_ : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_ : Any = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ : Dict = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_ : Any = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
UpperCAmelCase_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 100, ):
UpperCAmelCase_ : Dict = x_start
UpperCAmelCase_ : Union[str, Any] = fnc(a__ )
UpperCAmelCase_ : Optional[int] = 0.0
for _ in range(a__ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ : Union[str, Any] = (x_end - x_start) / steps + xa
UpperCAmelCase_ : Dict = fnc(a__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
UpperCAmelCase_ : Optional[int] = xa
UpperCAmelCase_ : Any = fxa
return length
if __name__ == "__main__":
def __a ( __lowerCamelCase ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_a = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 368 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase_ : int = np.zeros((n + 1,) )
UpperCAmelCase_ : Optional[int] = ya
UpperCAmelCase_ : Dict = xa
for k in range(__lowerCamelCase ):
UpperCAmelCase_ : int = y[k] + step_size * ode_func(__lowerCamelCase, y[k] )
UpperCAmelCase_ : Dict = y[k] + (
(step_size / 2) * (ode_func(__lowerCamelCase, y[k] ) + ode_func(x + step_size, __lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 0 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ (a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_ ):
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
UpperCAmelCase_ : str = eval_examples
UpperCAmelCase_ : Optional[int] = post_process_function
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = "eval" ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : int = self.get_eval_dataloader(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Dict = self.compute_metrics
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ : Optional[int] = time.time()
try:
UpperCAmelCase_ : Optional[Any] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase_ : Union[str, Any] = compute_metrics
UpperCAmelCase_ : str = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ : List[Any] = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
UpperCAmelCase_ : List[Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ : int = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase_ : Optional[int] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Dict = self.compute_metrics
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ : Dict = time.time()
try:
UpperCAmelCase_ : Optional[int] = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase_ : Union[str, Any] = compute_metrics
UpperCAmelCase_ : List[str] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : str = self.post_process_function(lowercase_ , lowercase_ , output.predictions , "predict" )
UpperCAmelCase_ : Dict = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ : List[Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : Tuple = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : Any = 0.01
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = time.time()
locka.acquire(_SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = "a" * 1000 + ".lock"
UpperCAmelCase_ : Optional[int] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 350 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : str = {}
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=1 ):
"""simple docstring"""
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase_ : Optional[int] = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase_ : List[str] = []
def UpperCamelCase__ ( self ):
"""simple docstring"""
return list(self.graph )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCamelCase__ ( self , lowercase_=-2 , lowercase_=-1 ):
"""simple docstring"""
if s == d:
return []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
if s == -2:
UpperCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase_ : List[str] = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCamelCase__ ( self , lowercase_=-1 ):
"""simple docstring"""
if c == -1:
UpperCAmelCase_ : Optional[int] = floor(random() * 1_0000 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase_ : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCamelCase__ ( self , lowercase_=-2 ):
"""simple docstring"""
UpperCAmelCase_ : int = deque()
UpperCAmelCase_ : Optional[int] = []
if s == -2:
UpperCAmelCase_ : Tuple = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase_ : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return len(self.graph[u] )
def UpperCamelCase__ ( self , lowercase_=-2 ):
"""simple docstring"""
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Union[str, Any] = []
if s == -2:
UpperCAmelCase_ : Any = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : List[Any] = s
UpperCAmelCase_ : str = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase_ : Optional[Any] = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : List[str] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = -2
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Tuple = s
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Union[str, Any] = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Tuple = True
if len(lowercase_ ) != 0:
UpperCAmelCase_ : Union[str, Any] = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : Union[str, Any] = False
indirect_parents.append(lowercase_ )
UpperCAmelCase_ : Optional[int] = s
UpperCAmelCase_ : int = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : int = -2
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[int] = s
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : List[Any] = True
if len(lowercase_ ) != 0:
UpperCAmelCase_ : int = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : List[str] = False
indirect_parents.append(lowercase_ )
UpperCAmelCase_ : List[Any] = s
UpperCAmelCase_ : Any = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCamelCase__ ( self , lowercase_=-2 , lowercase_=-1 ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = time()
return end - begin
def UpperCamelCase__ ( self , lowercase_=-2 ):
"""simple docstring"""
UpperCAmelCase_ : int = time()
self.bfs(lowercase_ )
UpperCAmelCase_ : str = time()
return end - begin
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : str = {}
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=1 ):
"""simple docstring"""
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase_ : int = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase_ : List[Any] = [[w, u]]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCamelCase__ ( self , lowercase_=-2 , lowercase_=-1 ):
"""simple docstring"""
if s == d:
return []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Tuple = []
if s == -2:
UpperCAmelCase_ : Optional[int] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase_ : Dict = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : Tuple = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCamelCase__ ( self , lowercase_=-1 ):
"""simple docstring"""
if c == -1:
UpperCAmelCase_ : int = floor(random() * 1_0000 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase_ : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCamelCase__ ( self , lowercase_=-2 ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = deque()
UpperCAmelCase_ : Optional[int] = []
if s == -2:
UpperCAmelCase_ : Optional[int] = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase_ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return len(self.graph[u] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[Any] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : Any = -2
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = s
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Optional[int] = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Dict = True
if len(lowercase_ ) != 0:
UpperCAmelCase_ : List[str] = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : str = False
indirect_parents.append(lowercase_ )
UpperCAmelCase_ : Tuple = s
UpperCAmelCase_ : int = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : str = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase_ : Tuple = -2
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Any = s
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : List[str] = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : List[Any] = True
if len(lowercase_ ) != 0:
UpperCAmelCase_ : Dict = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase_ : str = False
indirect_parents.append(lowercase_ )
UpperCAmelCase_ : List[Any] = s
UpperCAmelCase_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return list(self.graph )
def UpperCamelCase__ ( self , lowercase_=-2 , lowercase_=-1 ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = time()
return end - begin
def UpperCamelCase__ ( self , lowercase_=-2 ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = time()
self.bfs(lowercase_ )
UpperCAmelCase_ : Optional[int] = time()
return end - begin
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_ (_lowerCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 42
class A_ (_lowerCamelCase ,_lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ = 6_5536 , lowercase_ = None , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 0 , lowercase_ = "fourier" , lowercase_ = True , lowercase_ = False , lowercase_ = 0.0 , lowercase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase_ = "UNetMidBlock1D" , lowercase_ = None , lowercase_ = (32, 32, 64) , lowercase_ = None , lowercase_ = 8 , lowercase_ = 1 , lowercase_ = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Any = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase_ , log=lowercase_ , flip_sin_to_cos=lowercase_ )
UpperCAmelCase_ : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase_ , downscale_freq_shift=lowercase_ )
UpperCAmelCase_ : Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Any = block_out_channels[0] * 4
UpperCAmelCase_ : Optional[int] = TimestepEmbedding(
in_channels=lowercase_ , time_embed_dim=lowercase_ , act_fn=lowercase_ , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : Dict = nn.ModuleList([] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList([] )
UpperCAmelCase_ : Tuple = None
# down
UpperCAmelCase_ : Optional[Any] = in_channels
for i, down_block_type in enumerate(lowercase_ ):
UpperCAmelCase_ : List[Any] = output_channel
UpperCAmelCase_ : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : Union[str, Any] = i == len(lowercase_ ) - 1
UpperCAmelCase_ : Tuple = get_down_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase_ )
# mid
UpperCAmelCase_ : Dict = get_mid_block(
lowercase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase_ , add_downsample=lowercase_ , )
# up
UpperCAmelCase_ : Dict = list(reversed(lowercase_ ) )
UpperCAmelCase_ : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Optional[int] = out_channels
else:
UpperCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(lowercase_ ):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Any = (
reversed_block_out_channels[i + 1] if i < len(lowercase_ ) - 1 else final_upsample_channels
)
UpperCAmelCase_ : Tuple = i == len(lowercase_ ) - 1
UpperCAmelCase_ : Dict = get_up_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase_ )
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCAmelCase_ : Dict = get_out_block(
out_block_type=lowercase_ , num_groups_out=lowercase_ , embed_dim=block_out_channels[0] , out_channels=lowercase_ , act_fn=lowercase_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = timestep
if not torch.is_tensor(lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[str] = timesteps[None].to(sample.device )
UpperCAmelCase_ : Optional[int] = self.time_proj(lowercase_ )
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(lowercase_ )
else:
UpperCAmelCase_ : Any = timestep_embed[..., None]
UpperCAmelCase_ : Optional[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase_ : Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase_ : str = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Any = downsample_block(hidden_states=lowercase_ , temb=lowercase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : Tuple = self.mid_block(lowercase_ , lowercase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase_ : str = down_block_res_samples[-1:]
UpperCAmelCase_ : Optional[Any] = down_block_res_samples[:-1]
UpperCAmelCase_ : str = upsample_block(lowercase_ , res_hidden_states_tuple=lowercase_ , temb=lowercase_ )
# 5. post-process
if self.out_block:
UpperCAmelCase_ : List[str] = self.out_block(lowercase_ , lowercase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase_ )
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """owlvit_text_model"""
def __init__( self , lowercase_=4_9408 , lowercase_=512 , lowercase_=2048 , lowercase_=12 , lowercase_=8 , lowercase_=16 , lowercase_="quick_gelu" , lowercase_=1E-5 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=0 , lowercase_=4_9406 , lowercase_=4_9407 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ : Tuple = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """owlvit_vision_model"""
def __init__( self , lowercase_=768 , lowercase_=3072 , lowercase_=12 , lowercase_=12 , lowercase_=3 , lowercase_=768 , lowercase_=32 , lowercase_="quick_gelu" , lowercase_=1E-5 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """owlvit"""
SCREAMING_SNAKE_CASE__ : int = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=512 , lowercase_=2.65_92 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
if text_config is None:
UpperCAmelCase_ : Dict = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ : int = OwlViTTextConfig(**lowercase_ )
UpperCAmelCase_ : Optional[int] = OwlViTVisionConfig(**lowercase_ )
UpperCAmelCase_ : str = projection_dim
UpperCAmelCase_ : str = logit_scale_init_value
UpperCAmelCase_ : int = return_dict
UpperCAmelCase_ : Optional[Any] = 1.0
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = text_config
UpperCAmelCase_ : Optional[int] = vision_config
return cls.from_dict(lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.text_config.to_dict()
UpperCAmelCase_ : List[Any] = self.vision_config.to_dict()
UpperCAmelCase_ : List[Any] = self.__class__.model_type
return output
class A_ (lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-4
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : int = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowercase_ , seq_length=lowercase_ , framework=lowercase_ )
UpperCAmelCase_ : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowercase_ , framework=lowercase_ )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 14
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 0 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __a ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase_ : Optional[Any] = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching, "os.path.join", __lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __a ( ):
assert _test_patching.open is open
UpperCAmelCase_ : Dict = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, "open", __lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __a ( ):
UpperCAmelCase_ : List[str] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching, "pandas.read_csv", __lowerCamelCase ):
pass
def __a ( ):
UpperCAmelCase_ : List[str] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, "len", __lowerCamelCase ) is None
with patch_submodule(_test_patching, "len", __lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __a ( ):
UpperCAmelCase_ : List[str] = "__test_patch_submodule_start_and_stop_mock__"
UpperCAmelCase_ : int = patch_submodule(_test_patching, "open", __lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __a ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase_ : Optional[Any] = "__test_patch_submodule_successive_join__"
UpperCAmelCase_ : Dict = "__test_patch_submodule_successive_dirname__"
UpperCAmelCase_ : List[str] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, "os.path.join", __lowerCamelCase ):
with patch_submodule(_test_patching, "os.rename", __lowerCamelCase ):
with patch_submodule(_test_patching, "os.path.dirname", __lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, "os.rename", __lowerCamelCase ):
with patch_submodule(_test_patching, "os.path.join", __lowerCamelCase ):
with patch_submodule(_test_patching, "os.path.dirname", __lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __a ( ):
UpperCAmelCase_ : str = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", __lowerCamelCase ):
pass
with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", __lowerCamelCase ):
pass
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_a = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_a = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_a = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_a = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=0.9 , lowercase_=3 , lowercase_=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase_ : str = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
UpperCAmelCase_ : int = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 355 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A_ (A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'poolformer'
def __init__( self , lowercase_=3 , lowercase_=16 , lowercase_=16 , lowercase_=3 , lowercase_=4.0 , lowercase_=[2, 2, 6, 2] , lowercase_=[64, 128, 320, 512] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[2, 1, 1, 1] , lowercase_=4 , lowercase_=0.0 , lowercase_="gelu" , lowercase_=True , lowercase_=1E-5 , lowercase_=0.02 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = stride
UpperCAmelCase_ : str = padding
UpperCAmelCase_ : str = pool_size
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : str = patch_sizes
UpperCAmelCase_ : Optional[Any] = strides
UpperCAmelCase_ : List[str] = num_encoder_blocks
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = use_layer_scale
UpperCAmelCase_ : Dict = layer_scale_init_value
UpperCAmelCase_ : Tuple = initializer_range
super().__init__(**lowerCamelCase__ )
class A_ (A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2E-3
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ : int = tokenizer("Hello there" , return_tensors="np" ).input_ids
UpperCAmelCase_ : List[str] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
UpperCAmelCase_ : Any = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase_ : Dict = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
UpperCAmelCase_ : Tuple = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1] ) ).mean()
UpperCAmelCase_ : str = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ : str = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 357 |
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_a = 'us-east-1' # defaults region
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 42
SCREAMING_SNAKE_CASE__ : str = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
SCREAMING_SNAKE_CASE__ : List[Any] = {**hyperparameters, """max_steps""": 1000}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return F"""{self.framework}-transfromers-test"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = SageMakerTestEnvironment(framework=request.cls.framework )
| 358 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = {}
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = probability
def UpperCamelCase__ ( self ):
"""simple docstring"""
return list(self.connections )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Any = Counter(graph.get_nodes() )
UpperCAmelCase_ : Optional[int] = start
for _ in range(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = graph.transition(__lowerCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """Wav2Vec2FeatureExtractor"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """AutoTokenizer"""
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.feature_extractor
UpperCAmelCase_ : Optional[int] = False
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , lowerCamelCase_ , )
UpperCAmelCase_ : str = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : List[str] = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
return cls(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
def __call__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ : Optional[int] = kwargs.pop("audio" , lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("sampling_rate" , lowerCamelCase_ )
UpperCAmelCase_ : Dict = kwargs.pop("text" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase_ : int = args[0]
UpperCAmelCase_ : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ : Optional[int] = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : Any = encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("input_features" , lowerCamelCase_ )
UpperCAmelCase_ : Tuple = kwargs.pop("labels" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase_ : List[Any] = args[0]
UpperCAmelCase_ : str = args[1:]
if input_features is not None:
UpperCAmelCase_ : Dict = self.feature_extractor.pad(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if labels is not None:
UpperCAmelCase_ : List[str] = self.tokenizer.pad(lowerCamelCase_ , **lowerCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ : Optional[Any] = labels["input_ids"]
return input_features
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Union[str, Any] = self.tokenizer
yield
UpperCAmelCase_ : int = self.feature_extractor
UpperCAmelCase_ : Tuple = False
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 0 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def __a ( ):
UpperCAmelCase_ : Tuple = 9
UpperCAmelCase_ : Union[str, Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : Tuple = kruskal(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_ : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCAmelCase ) == sorted(__lowerCAmelCase ) | 361 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ (a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """new-model"""
if is_tf_available():
class A_ (a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = NewModelConfig
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = "bert-base-cased"
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Any = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "bert-base-cased"
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Any = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : int = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCAmelCase_ : List[str] = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : str = copy.deepcopy(model.config )
UpperCAmelCase_ : Optional[Any] = ["FunnelBaseModel"]
UpperCAmelCase_ : List[Any] = TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
UpperCAmelCase_ : str = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ : List[str] = BertModelTester(self ).get_config()
UpperCAmelCase_ : Any = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase_ : str = auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ : Any = auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained("bert-base" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ : Any = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
UpperCAmelCase_ : Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase__ , "Use `from_pt=True` to load this model" ):
UpperCAmelCase_ : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Make sure we have cached the model.
UpperCAmelCase_ : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCAmelCase_ : Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase_ : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
UpperCAmelCase_ : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 362 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A_ (lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ = 128 , lowercase_ = 256 , lowercase_ = 20_00.0 , lowercase_ = 768 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 64 , lowercase_ = 2048 , lowercase_ = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.Sequential(
nn.Linear(a__ , d_model * 4 , bias=a__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a__ ) , nn.SiLU() , )
UpperCAmelCase_ : Tuple = nn.Embedding(a__ , a__ )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = nn.Linear(a__ , a__ , bias=a__ )
UpperCAmelCase_ : Optional[Any] = nn.Dropout(p=a__ )
UpperCAmelCase_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(a__ ):
# FiLM conditional T5 decoder
UpperCAmelCase_ : Dict = DecoderLayer(d_model=a__ , d_kv=a__ , num_heads=a__ , d_ff=a__ , dropout_rate=a__ )
self.decoders.append(a__ )
UpperCAmelCase_ : Optional[int] = TaLayerNorm(a__ )
UpperCAmelCase_ : List[Any] = nn.Dropout(p=a__ )
UpperCAmelCase_ : Optional[Any] = nn.Linear(a__ , a__ , bias=a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ : int = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase_ : Any = self.conditioning_emb(a__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ : int = torch.broadcast_to(
torch.arange(a__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase_ : int = self.position_encoding(a__ )
UpperCAmelCase_ : List[Any] = self.continuous_inputs_projection(a__ )
inputs += position_encodings
UpperCAmelCase_ : Union[str, Any] = self.dropout(a__ )
# decoder: No padding present.
UpperCAmelCase_ : List[str] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ : Optional[int] = [(x, self.encoder_decoder_mask(a__ , a__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase_ : int = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase_ : Any = lyr(
a__ , conditioning_emb=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )[0]
UpperCAmelCase_ : int = self.decoder_norm(a__ )
UpperCAmelCase_ : Any = self.post_dropout(a__ )
UpperCAmelCase_ : Dict = self.spec_out(a__ )
return spec_out
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a__ , d_ff=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ ) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : int = self.layer[0](
a__ , conditioning_emb=a__ , attention_mask=a__ , )
if encoder_hidden_states is not None:
UpperCAmelCase_ : Union[str, Any] = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase_ : Optional[Any] = self.layer[1](
a__ , key_value_states=a__ , attention_mask=a__ , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ : Optional[int] = self.layer[-1](a__ , a__ )
return (hidden_states,)
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : int = TaLayerNorm(a__ )
UpperCAmelCase_ : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
UpperCAmelCase_ : Optional[int] = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
UpperCAmelCase_ : List[str] = nn.Dropout(a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , ):
"""simple docstring"""
# pre_self_attention_layer_norm
UpperCAmelCase_ : Optional[Any] = self.layer_norm(a__ )
if conditioning_emb is not None:
UpperCAmelCase_ : str = self.FiLMLayer(a__ , a__ )
# Self-attention block
UpperCAmelCase_ : List[str] = self.attention(a__ )
UpperCAmelCase_ : Tuple = hidden_states + self.dropout(a__ )
return hidden_states
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Any = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
UpperCAmelCase_ : List[Any] = TaLayerNorm(a__ , eps=a__ )
UpperCAmelCase_ : Any = nn.Dropout(a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.layer_norm(a__ )
UpperCAmelCase_ : Optional[Any] = self.attention(
a__ , encoder_hidden_states=a__ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase_ : Optional[Any] = hidden_states + self.dropout(a__ )
return layer_output
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Dict = TaDenseGatedActDense(d_model=a__ , d_ff=a__ , dropout_rate=a__ )
UpperCAmelCase_ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
UpperCAmelCase_ : List[Any] = TaLayerNorm(a__ , eps=a__ )
UpperCAmelCase_ : Optional[Any] = nn.Dropout(a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.layer_norm(a__ )
if conditioning_emb is not None:
UpperCAmelCase_ : str = self.film(a__ , a__ )
UpperCAmelCase_ : Dict = self.DenseReluDense(a__ )
UpperCAmelCase_ : Optional[Any] = hidden_states + self.dropout(a__ )
return hidden_states
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Any = nn.Linear(a__ , a__ , bias=a__ )
UpperCAmelCase_ : Any = nn.Linear(a__ , a__ , bias=a__ )
UpperCAmelCase_ : Optional[int] = nn.Linear(a__ , a__ , bias=a__ )
UpperCAmelCase_ : Dict = nn.Dropout(a__ )
UpperCAmelCase_ : Dict = NewGELUActivation()
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.act(self.wi_a(a__ ) )
UpperCAmelCase_ : Optional[Any] = self.wi_a(a__ )
UpperCAmelCase_ : List[str] = hidden_gelu * hidden_linear
UpperCAmelCase_ : Union[str, Any] = self.dropout(a__ )
UpperCAmelCase_ : Optional[Any] = self.wo(a__ )
return hidden_states
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : List[str] = nn.Parameter(torch.ones(a__ ) )
UpperCAmelCase_ : Union[str, Any] = eps
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase_ : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a__ )
UpperCAmelCase_ : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A_ (nn.Module ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(a__ , 3.0 )) ))
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Any = nn.Linear(a__ , out_features * 2 , bias=a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scale_bias(a__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = torch.chunk(a__ , 2 , -1 )
UpperCAmelCase_ : Any = x * (1 + scale) + shift
return x
| 363 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
if not isinstance(snake_case__, snake_case__ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_ : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : Tuple = """"""
else:
UpperCAmelCase_ : Optional[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : str = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__, lowerCAmelCase__ )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = val
def __a ( ):
UpperCAmelCase_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : List[str] = Image.open(requests.get(lowerCAmelCase__, stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
UpperCAmelCase_ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase_ : int = 8
# set labels if required
if not base_model:
UpperCAmelCase_ : Optional[int] = 1000
UpperCAmelCase_ : str = """huggingface/label-files"""
UpperCAmelCase_ : int = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowerCAmelCase__, lowerCAmelCase__, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase_ : Dict = 384
UpperCAmelCase_ : Dict = 1536
UpperCAmelCase_ : List[Any] = 12
UpperCAmelCase_ : Dict = 6
# load original model from torch hub
UpperCAmelCase_ : Optional[int] = torch.hub.load("facebookresearch/dino:main", lowerCAmelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = create_rename_keys(lowerCAmelCase__, base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
# load HuggingFace model
if base_model:
UpperCAmelCase_ : Optional[int] = ViTModel(lowerCAmelCase__, add_pooling_layer=lowerCAmelCase__ ).eval()
else:
UpperCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase_ : str = ViTImageProcessor()
UpperCAmelCase_ : Optional[Any] = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : Any = encoding["""pixel_values"""]
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase__ )
if base_model:
UpperCAmelCase_ : int = original_model(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
UpperCAmelCase_ : str = original_model(lowerCAmelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 365 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
import sys
import turtle
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_a = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_a = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 366 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class A_ (_a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """ctrl"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Any = n_positions
UpperCAmelCase_ : Dict = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[Any] = dff
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = use_cache
super().__init__(**lowercase_ )
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger()
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 42
SCREAMING_SNAKE_CASE__ : Dict = field(default_factory=__a )
SCREAMING_SNAKE_CASE__ : str = field(default_factory=__a )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self , lowercase_ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 42
SCREAMING_SNAKE_CASE__ : int = 42
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Dict = field(default_factory=__a )
SCREAMING_SNAKE_CASE__ : Dict = field(default_factory=__a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
def __call__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Tracker(self.dest )(UpperCamelCase__ ).parametrized
UpperCAmelCase_ : Optional[Any] = Tracker(self.src )(UpperCamelCase__ ).parametrized
UpperCAmelCase_ : Optional[Any] = list(filter(lambda lowercase_ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
UpperCAmelCase_ : Optional[Any] = list(filter(lambda lowercase_ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"""
F""" destination module has {len(UpperCamelCase__ )}.""" )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class A_ (nn.Module ):
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Tuple = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"""Unexpected layer name {k}"""
UpperCAmelCase_ : str = len(UpperCamelCase__ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
UpperCAmelCase_ : Optional[int] = nn.ModuleDict(UpperCamelCase__ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase__ , out_feat_keys=UpperCamelCase__ , feature_blocks=self._feature_blocks , )
class A_ (__a ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
if x not in self:
UpperCAmelCase_ : Dict = self.convert_name_to_timm(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = partial(lambda: (timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval(), None) )
else:
UpperCAmelCase_ : Union[str, Any] = super().__getitem__(UpperCamelCase__ )
return val
class A_ (__a ):
'''simple docstring'''
def __getitem__( self , lowercase_ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : List[Any] = RegNetModel
else:
UpperCAmelCase_ : Optional[Any] = RegNetForImageClassification
return val
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for from_key, to_key in keys:
UpperCAmelCase_ : int = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = True, ):
print(f"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ : int = from_model_func()
UpperCAmelCase_ : Optional[int] = our_model_func(__UpperCamelCase ).eval()
UpperCAmelCase_ : List[str] = ModuleTransfer(src=__UpperCamelCase, dest=__UpperCamelCase, raise_if_mismatch=__UpperCamelCase )
UpperCAmelCase_ : int = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
if from_state_dict is not None:
UpperCAmelCase_ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Any = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCAmelCase_ : Tuple = manually_copy_vissl_head(__UpperCamelCase, our_model.state_dict(), __UpperCamelCase )
our_model.load_state_dict(__UpperCamelCase )
UpperCAmelCase_ : List[str] = our_model(__UpperCamelCase, output_hidden_states=__UpperCamelCase )
UpperCAmelCase_ : str = (
our_outputs.logits if isinstance(__UpperCamelCase, __UpperCamelCase ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : List[str] = from_model(__UpperCamelCase )
UpperCAmelCase_ : Tuple = from_output[-1] if type(__UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(__UpperCamelCase, __UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="Add model", use_temp_dir=__UpperCamelCase, )
UpperCAmelCase_ : str = 224 if "seer" not in name else 384
# we can use the convnext one
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=__UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="Add image processor", use_temp_dir=__UpperCamelCase, )
print(f"""Pushed {name}""" )
def __a ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = True ):
UpperCAmelCase_ : Union[str, Any] = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = 1000
UpperCAmelCase_ : List[str] = (1, num_labels)
UpperCAmelCase_ : Dict = "huggingface/label-files"
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Optional[int] = json.load(open(cached_download(hf_hub_url(__UpperCamelCase, __UpperCamelCase, repo_type="dataset" ) ), "r" ) )
UpperCAmelCase_ : Dict = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = partial(__UpperCamelCase, num_labels=__UpperCamelCase, idalabel=__UpperCamelCase, labelaid=__UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 1_1110, 2_8280], groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 1_1110, 2_8280], groups_width=1010 ),
}
UpperCAmelCase_ : str = NameToOurModelFuncMap()
UpperCAmelCase_ : Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCamelCase, __lowerCamelCase ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase, model_dir=str(__UpperCamelCase ), map_location="cpu" )
UpperCAmelCase_ : int = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Union[str, Any] = files["classy_state_dict"]["base_model"]["model"]
UpperCAmelCase_ : Optional[int] = model_state_dict["trunk"]
model.load_state_dict(__UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : Union[str, Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
UpperCAmelCase_ : List[Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
UpperCAmelCase_ : Tuple = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
UpperCAmelCase_ : Any = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
UpperCAmelCase_ : Optional[Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
UpperCAmelCase_ : Dict = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
UpperCAmelCase_ : int = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
UpperCAmelCase_ : List[Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
__UpperCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], __UpperCamelCase, __UpperCamelCase, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__UpperCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, )
return config, expected_shape
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_a = parser.parse_args()
_a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 368 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def __a ( ) -> str:
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(snake_case_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : List[str] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu", "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda", "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention", snake_case_, with_cuda=snake_case_, extra_include_paths=[str(snake_case_ )], extra_cflags=["-DWITH_CUDA=1"], extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a = ''
_a = ''
_a = ''
_a = 1 # (0 is vertical, 1 is horizontal)
def __a ( ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataset(a__, a__ )
print("Processing..." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = update_image_and_anno(a__, a__, a__ )
for index, image in enumerate(a__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ : Tuple = random_chars(32 )
UpperCAmelCase_ : Any = paths[index].split(os.sep )[-1].rsplit(".", 1 )[0]
UpperCAmelCase_ : Any = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", a__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(a__ )} with {file_name}""" )
UpperCAmelCase_ : Union[str, Any] = []
for anno in new_annos[index]:
UpperCAmelCase_ : Optional[Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a__ )
with open(f"""/{file_root}.txt""", "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(a__, "*.txt" ) ):
UpperCAmelCase_ : List[str] = label_file.split(os.sep )[-1].rsplit(".", 1 )[0]
with open(a__ ) as in_file:
UpperCAmelCase_ : Union[str, Any] = in_file.readlines()
UpperCAmelCase_ : Optional[int] = os.path.join(a__, f"""{label_name}.jpg""" )
UpperCAmelCase_ : Any = []
for obj_list in obj_lists:
UpperCAmelCase_ : Optional[Any] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a__ )
labels.append(a__ )
return img_paths, labels
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 ):
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = []
for idx in range(len(a__ ) ):
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[Any] = img_list[idx]
path_list.append(a__ )
UpperCAmelCase_ : Optional[Any] = anno_list[idx]
UpperCAmelCase_ : Union[str, Any] = cva.imread(a__ )
if flip_type == 1:
UpperCAmelCase_ : List[str] = cva.flip(a__, a__ )
for bbox in img_annos:
UpperCAmelCase_ : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase_ : List[str] = cva.flip(a__, a__ )
for bbox in img_annos:
UpperCAmelCase_ : int = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a__ )
new_imgs_list.append(a__ )
return new_imgs_list, new_annos_lists, path_list
def __a ( __lowerCamelCase = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ : int = ascii_lowercase + digits
return "".join(random.choice(a__ ) for _ in range(a__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 370 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class A_ (snake_case_ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE__ : str = field(default="""summarization""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_a = parser.parse_args()
if args.model_type == "roberta":
_a = RobertaForMaskedLM.from_pretrained(args.model_name)
_a = "roberta"
elif args.model_type == "gpt2":
_a = GPTaLMHeadModel.from_pretrained(args.model_name)
_a = "transformer"
_a = model.state_dict()
_a = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_a = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_a = f"""{prefix}.embeddings.{w}.weight"""
_a = state_dict[param_name]
for w in ["weight", "bias"]:
_a = f"""{prefix}.embeddings.LayerNorm.{w}"""
_a = state_dict[param_name]
# Transformer Blocks #
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_a = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_a = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_a = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_a = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[f"""lm_head.dense.{w}"""]
_a = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_a = state_dict[f"""{prefix}.ln_f.{w}"""]
_a = state_dict["lm_head.weight"]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 350 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open("w" ).writelines(lowerCAmelCase__ )
def __a ( __lowerCamelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__, f"""{split}.source""" ), lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__, f"""{split}.target""" ), lowerCAmelCase__ )
return tmp_dir
class A_ (__lowercase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
UpperCAmelCase_ : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : List[Any] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
UpperCAmelCase_ : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : Dict = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
UpperCAmelCase_ : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Optional[int] = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=20 , max_target_length=snake_case_ , )
UpperCAmelCase_ : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCAmelCase_ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCAmelCase_ : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 128 , snake_case_ )
UpperCAmelCase_ : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : str = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ : Any = self._get_dataset(max_len=64 )
UpperCAmelCase_ : int = 64
UpperCAmelCase_ : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
UpperCAmelCase_ : List[str] = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
UpperCAmelCase_ : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Union[str, Any] = []
for batch in data_loader:
UpperCAmelCase_ : Any = batch['''input_ids'''].shape
UpperCAmelCase_ : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : Optional[Any] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(F"""too many tokens in {len(snake_case_ )} batches""" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self._get_dataset(max_len=512 )
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
UpperCAmelCase_ : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
UpperCAmelCase_ : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(lowercase_ , lowercase_="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k="labels" ) ) < sum(count_pad_tokens(snake_case_ , k="labels" ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def UpperCamelCase__ ( self , lowercase_=1000 , lowercase_=128 ):
"""simple docstring"""
if os.getenv("USE_REAL_DATA" , snake_case_ ):
UpperCAmelCase_ : Optional[int] = '''examples/seq2seq/wmt_en_ro'''
UpperCAmelCase_ : List[Any] = max_len * 2 * 64
if not Path(snake_case_ ).joinpath("train.len" ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
UpperCAmelCase_ : int = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCAmelCase_ : List[str] = max_len * 4
save_len_file(snake_case_ , snake_case_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[int] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self._get_dataset()
UpperCAmelCase_ : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
UpperCAmelCase_ : Tuple = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
UpperCAmelCase_ : Any = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
UpperCAmelCase_ : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : Optional[Any] = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = checkpoint
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : List[Any] = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ : Dict = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ : int = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ : int = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ : Any = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ : List[Any] = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ : List[Any] = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ : List[str] = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ : List[Any] = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ : Dict = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ : str = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ : Dict = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Optional[int] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : Tuple = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : Any = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : int = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : Optional[Any] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Dict = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : Any = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Any = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Any = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ : int = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = num_up_blocks - 1 - i
UpperCAmelCase_ : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : str = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Tuple = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ : Union[str, Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : List[Any] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : int = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ : List[Any] = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : Tuple = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
UpperCAmelCase_ : str = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : List[str] = io.BytesIO(r.content )
UpperCAmelCase_ : Union[str, Any] = OmegaConf.load(__lowerCamelCase )
UpperCAmelCase_ : Dict = 512
UpperCAmelCase_ : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : Tuple = {}
with safe_open(__lowerCamelCase, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : str = f.get_tensor(__lowerCamelCase )
else:
UpperCAmelCase_ : str = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ : Union[str, Any] = create_vae_diffusers_config(__lowerCamelCase, image_size=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Any = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __a ( __lowerCamelCase ):
return choice(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
UpperCAmelCase_ : Any = [e for e in lst if e < pivot]
UpperCAmelCase_ : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase, k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_a = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class A_ (UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : str = MBartTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase_ : List[Any] = vocab_file
UpperCAmelCase_ : Optional[Any] = False if not self.vocab_file else True
UpperCAmelCase_ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : int = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : Tuple = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Tuple = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCAmelCase_ : int = tgt_lang_id
return inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = "en_XX" , lowercase_ = None , lowercase_ = "ro_RO" , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Any = src_lang
UpperCAmelCase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """blip_text_model"""
def __init__( self , lowercase_=3_0524 , lowercase_=768 , lowercase_=768 , lowercase_=3072 , lowercase_=768 , lowercase_=12 , lowercase_=8 , lowercase_=512 , lowercase_="gelu" , lowercase_=1E-1_2 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=3_0522 , lowercase_=2 , lowercase_=0 , lowercase_=102 , lowercase_=True , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , sep_token_id=__lowerCamelCase , **__lowerCamelCase , )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = encoder_hidden_size
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = projection_dim
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = is_decoder
UpperCAmelCase_ : List[Any] = use_cache
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase_ : Dict = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase_ : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """blip_vision_model"""
def __init__( self , lowercase_=768 , lowercase_=3072 , lowercase_=512 , lowercase_=12 , lowercase_=12 , lowercase_=384 , lowercase_=16 , lowercase_="gelu" , lowercase_=1E-5 , lowercase_=0.0 , lowercase_=1E-1_0 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = projection_dim
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Dict = hidden_act
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase_ : Any = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase_ : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class A_ (lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """blip"""
SCREAMING_SNAKE_CASE__ : List[Any] = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=512 , lowercase_=2.65_92 , lowercase_=256 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if text_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase_ : List[Any] = BlipTextConfig(**__lowerCamelCase )
UpperCAmelCase_ : int = BlipVisionConfig(**__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = self.vision_config.hidden_size
UpperCAmelCase_ : Any = projection_dim
UpperCAmelCase_ : Dict = logit_scale_init_value
UpperCAmelCase_ : int = 1.0
UpperCAmelCase_ : Tuple = 0.02
UpperCAmelCase_ : Union[str, Any] = image_text_hidden_size
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.text_config.to_dict()
UpperCAmelCase_ : Tuple = self.vision_config.to_dict()
UpperCAmelCase_ : List[Any] = self.__class__.model_type
return output
| 355 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return 0.0
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCAmelCase_ : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : int = [1] + [0] * (size - 1)
UpperCAmelCase_ : Tuple = [filter_type.process(UpperCAmelCase_ ) for item in inputs]
UpperCAmelCase_ : Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Any = np.abs(np.fft.fft(UpperCAmelCase_ ) )
UpperCAmelCase_ : Optional[int] = 20 * np.logaa(UpperCAmelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
UpperCAmelCase_ : Any = get_bounds(UpperCAmelCase_, UpperCAmelCase_ )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(UpperCAmelCase_ )
plt.show()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = 512
UpperCAmelCase_ : Optional[Any] = [1] + [0] * (size - 1)
UpperCAmelCase_ : str = [filter_type.process(UpperCAmelCase_ ) for item in inputs]
UpperCAmelCase_ : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Dict = np.angle(np.fft.fft(UpperCAmelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(UpperCAmelCase_, -2 * pi ) )
plt.show()
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_a = logging.get_logger(__name__)
class A_ (lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 357 |
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23 | 0 |
"""simple docstring"""
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = len(A__ )
UpperCAmelCase_ : str = [0] * len_array
if len_array > 0:
UpperCAmelCase_ : List[str] = array[0]
for i in range(1 , A__ ):
UpperCAmelCase_ : Dict = self.prefix_sum[i - 1] + array[i]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23 | 0 |
import math
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [True] * n
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
UpperCAmelCase_ : Dict = i * 2
while index < n:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : str = index + i
UpperCAmelCase_ : List[Any] = [2]
for i in range(3, __lowerCamelCase, 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def __a ( __lowerCamelCase = 9999_6666_3333 ):
UpperCAmelCase_ : Optional[Any] = math.floor(math.sqrt(__lowerCamelCase ) ) + 100
UpperCAmelCase_ : List[Any] = prime_sieve(__lowerCamelCase )
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Any = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase_ : Tuple = primes[prime_index + 1]
UpperCAmelCase_ : Tuple = last_prime**2
UpperCAmelCase_ : str = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase_ : Any = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase_ : Tuple = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase_ : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase_ : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 359 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23 | 0 |
"""simple docstring"""
_a = 256
# Modulus to hash a string
_a = 1_000_003
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = len(__lowerCamelCase )
UpperCAmelCase_ : Any = len(__lowerCamelCase )
if p_len > t_len:
return False
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCAmelCase_ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCAmelCase_ : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCAmelCase_ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ):
UpperCAmelCase_ : Optional[Any] = '''abc1abc12'''
UpperCAmelCase_ : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCAmelCase_ : Tuple = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase ) and not rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 2)
UpperCAmelCase_ : int = '''ABABX'''
UpperCAmelCase_ : Dict = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 3)
UpperCAmelCase_ : int = '''AAAB'''
UpperCAmelCase_ : List[Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 4)
UpperCAmelCase_ : Optional[int] = '''abcdabcy'''
UpperCAmelCase_ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 5)
UpperCAmelCase_ : int = '''Lü'''
UpperCAmelCase_ : int = '''Lüsai'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : str = '''Lue'''
assert not rabin_karp(__lowerCamelCase, __lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase_ : List[str] = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Tuple = get_size_dict(__a )
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Optional[int] = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : str = do_rescale
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : List[str] = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : int = resample
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_size_dict(__a )
if "shortest_edge" in size:
UpperCAmelCase_ : str = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : Dict = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ):
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Optional[int] = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
UpperCAmelCase_ : Any = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(__a )
if not is_batched(__a ):
UpperCAmelCase_ : str = [images]
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ : Dict = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCAmelCase_ : int = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ : Optional[int] = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ : List[Any] = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a ) | 361 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 | 0 |
import math
import unittest
def __a ( __lowerCamelCase ):
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 362 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23 | 0 |
"""simple docstring"""
import sys
_a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE_ )
return product
def __a ( __lowerCamelCase = N ):
UpperCAmelCase_ : Dict = -sys.maxsize - 1
UpperCAmelCase_ : str = n[:13]
UpperCAmelCase_ : Any = 13
while cur_index < len(SCREAMING_SNAKE_CASE_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCAmelCase_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCAmelCase_ : Optional[Any] = max(SCREAMING_SNAKE_CASE_, str_eval(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 363 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ : Dict = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", snake_case__, )
is not None
):
UpperCAmelCase_ : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ : Tuple = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ : Tuple = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
UpperCAmelCase_ : Union[str, Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
UpperCAmelCase_ : int = True
if not attribute_used:
UpperCAmelCase_ : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ : Dict = True
elif attribute.endswith("_token_id" ):
UpperCAmelCase_ : int = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
UpperCAmelCase_ : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ : str = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
UpperCAmelCase_ : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ : Tuple = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ : Optional[int] = inspect.getsourcefile(snake_case__ )
UpperCAmelCase_ : List[str] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ : List[str] = [os.path.join(snake_case__, snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith("modeling_" )]
# Get the source code strings
UpperCAmelCase_ : Optional[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ : List[Any] = []
for config_param, default_value in zip(snake_case__, snake_case__ ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__, snake_case__, snake_case__, snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def __a ( ):
UpperCAmelCase_ : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ : List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda __lowerCamelCase : inspect.isclass(snake_case__ )
and issubclass(snake_case__, snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ : List[Any] = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
UpperCAmelCase_ : List[Any] = unused_attributes
if len(snake_case__ ) > 0:
UpperCAmelCase_ : Optional[Any] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 364 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=False , lowercase_=True , lowercase_="None" , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : List[Any] = relative_attention
UpperCAmelCase_ : Tuple = position_biased_input
UpperCAmelCase_ : str = pos_att_type
UpperCAmelCase_ : Optional[int] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_config()
UpperCAmelCase_ : List[str] = 300
return config
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = DebertaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase_ : Any = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Tuple = DebertaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = DebertaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Any = config_and_inputs
UpperCAmelCase_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = DebertaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : Dict = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 365 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __a ( __lowerCamelCase = 8 ):
UpperCAmelCase_ : Tuple = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def __a ( __lowerCamelCase, __lowerCamelCase ):
i -= len(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = i // 3
UpperCAmelCase_ : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_ : Any = (
chars_incl
+ random(__lowerCamelCase, quotient + remainder )
+ random(__lowerCamelCase, __lowerCamelCase )
+ random(__lowerCamelCase, __lowerCamelCase )
)
UpperCAmelCase_ : str = list(__lowerCamelCase )
shuffle(__lowerCamelCase )
return "".join(__lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def __a ( __lowerCamelCase, __lowerCamelCase ):
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def __a ( __lowerCamelCase, __lowerCamelCase ):
pass # Put your code here...
def __a ( __lowerCamelCase, __lowerCamelCase ):
pass # Put your code here...
def __a ( __lowerCamelCase, __lowerCamelCase ):
pass # Put your code here...
def __a ( __lowerCamelCase, __lowerCamelCase = 8 ):
if len(__lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_ : Union[str, Any] = any(char in ascii_uppercase for char in password )
UpperCAmelCase_ : Optional[Any] = any(char in ascii_lowercase for char in password )
UpperCAmelCase_ : List[str] = any(char in digits for char in password )
UpperCAmelCase_ : str = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __a ( ):
UpperCAmelCase_ : Tuple = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase_ : Any = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:", password_generator(__lowerCamelCase ) )
print(
"Alternative Password generated:", alternative_password_generator(__lowerCamelCase, __lowerCamelCase ), )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 366 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ (UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ : List[Any] = """AutoImageProcessor"""
SCREAMING_SNAKE_CASE__ : Any = """AutoTokenizer"""
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__(__a , __a )
UpperCAmelCase_ : List[str] = self.image_processor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ : Tuple = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
UpperCAmelCase_ : Optional[int] = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
UpperCAmelCase_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
def __a ( __lowerCamelCase ):
if any(not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 368 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (__snake_case ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LongformerTokenizer
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Any = LongformerTokenizerFast
SCREAMING_SNAKE_CASE__ : Any = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCAmelCase_ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = "lower newer"
UpperCAmelCase_ : Optional[Any] = "lower newer"
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : Optional[int] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ : List[str] = tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCamelCase__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCamelCase__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCAmelCase_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase_ : Dict = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = "Encode this sequence."
UpperCAmelCase_ : Optional[int] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCAmelCase_ : Tuple = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCAmelCase_ : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
UpperCAmelCase_ : Optional[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
UpperCAmelCase_ : Any = "Encode <mask> sequence"
UpperCAmelCase_ : str = "Encode <mask>sequence"
UpperCAmelCase_ : Any = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase_ : str = encoded.index(UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = encoded.index(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase_ : int = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ : Optional[Any] = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCAmelCase_ : Tuple = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCamelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Any = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : int = F"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : Any = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : str = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : int = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : int = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : str = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase_ : str = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_ (lowerCamelCase_ ):
'''simple docstring'''
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,lowerCamelCase_ ,)
| 370 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase_ : str = 1024
UpperCAmelCase_ : Tuple = 4096
UpperCAmelCase_ : List[Any] = 24
UpperCAmelCase_ : Tuple = 16
UpperCAmelCase_ : Union[str, Any] = [5, 11, 17, 23]
UpperCAmelCase_ : str = [256, 512, 1024, 1024]
UpperCAmelCase_ : Dict = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Any = 150
UpperCAmelCase_ : Union[str, Any] = "huggingface/label-files"
UpperCAmelCase_ : Union[str, Any] = "ade20k-id2label.json"
UpperCAmelCase_ : int = json.load(open(cached_download(hf_hub_url(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ) ), "r" ) )
UpperCAmelCase_ : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __a ( __lowerCamelCase ) -> Tuple:
UpperCAmelCase_ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase_ : Dict = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
UpperCAmelCase_ : Dict = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
UpperCAmelCase_ : Optional[int] = name.replace("patch_embed", "patch_embeddings" )
if "pos_embed" in name:
UpperCAmelCase_ : List[str] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
UpperCAmelCase_ : List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("proj", "projection" )
if "blocks" in name:
UpperCAmelCase_ : int = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
UpperCAmelCase_ : int = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name:
UpperCAmelCase_ : Tuple = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : List[str] = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
UpperCAmelCase_ : List[str] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
UpperCAmelCase_ : Any = name.replace("scratch", "neck" )
if "layer1_rn" in name:
UpperCAmelCase_ : Optional[int] = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
UpperCAmelCase_ : Any = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
UpperCAmelCase_ : List[str] = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
UpperCAmelCase_ : List[str] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
UpperCAmelCase_ : str = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase_ : Optional[int] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
UpperCAmelCase_ : Optional[int] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
UpperCAmelCase_ : List[Any] = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
UpperCAmelCase_ : Tuple = name.replace("conv1", "convolution1" )
if "conv2" in name:
UpperCAmelCase_ : Optional[int] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase_ : List[Any] = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase_ : int = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase_ : Optional[int] = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase_ : int = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase_ : Tuple = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase_ : Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCAmelCase_ : List[str] = name.replace("pretrained", "dpt" )
if "bn" in name:
UpperCAmelCase_ : List[str] = name.replace("bn", "batch_norm" )
if "head" in name:
UpperCAmelCase_ : Dict = name.replace("head", "head.head" )
if "encoder.norm" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("auxlayer", "auxiliary_head.head" )
return name
def __a ( __lowerCamelCase, __lowerCamelCase ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : List[Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __a ( ) -> Dict:
UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Any = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase_ : Optional[Any] = state_dict.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase_ : str = 480 if "ade" in checkpoint_url else 384
UpperCAmelCase_ : str = DPTImageProcessor(size=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(__lowerCamelCase, return_tensors="pt" )
# forward pass
UpperCAmelCase_ : str = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
# Assert logits
UpperCAmelCase_ : Tuple = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(__lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3], __lowerCamelCase, atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], __lowerCamelCase )
)
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase, __lowerCamelCase ), organization="nielsr", commit_message="Add model", use_temp_dir=__lowerCamelCase, )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase, __lowerCamelCase ), organization="nielsr", commit_message="Add image processor", use_temp_dir=__lowerCamelCase, )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
_a = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
UpperCAmelCase_ : str = torch.tensor(tokenizer.encode(__lowerCamelCase, add_special_tokens=__lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase_ : Any = model(__lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase_ : Optional[int] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase_ : List[str] = logits[0, masked_index, :]
UpperCAmelCase_ : int = logits.softmax(dim=0 )
UpperCAmelCase_ : Optional[Any] = prob.topk(k=__lowerCamelCase, dim=0 )
UpperCAmelCase_ : int = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowerCamelCase ) )] )
UpperCAmelCase_ : int = tokenizer.mask_token
UpperCAmelCase_ : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
UpperCAmelCase_ : Optional[Any] = predicted_token_bpe.replace("\u2581", " " )
if " {0}".format(__lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(__lowerCamelCase ), __lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__lowerCamelCase, __lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_a = CamembertTokenizer.from_pretrained('camembert-base')
_a = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_a = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 350 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = value
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = tree
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
"""simple docstring"""
from math import loga
def __a ( __lowerCamelCase ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(__lowerCamelCase )
or left < -len(__lowerCamelCase )
or right >= len(__lowerCamelCase )
or right < -len(__lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
UpperCAmelCase_ : int = (left + right) >> 1 # the middle
UpperCAmelCase_ : Union[str, Any] = find_max(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # find max in range[left, mid]
UpperCAmelCase_ : Any = find_max(__lowerCamelCase, mid + 1, __lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __a ( __lowerCamelCase ):
def wrapper(*__lowerCamelCase, **__lowerCamelCase ):
UpperCAmelCase_ : Dict = timeit.default_timer()
UpperCAmelCase_ : Any = func(*__lowerCamelCase, **__lowerCamelCase )
UpperCAmelCase_ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ : Dict = func.__name__
return wrapper
def __a ( __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ):
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = seq_shapes or {}
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase, _ArrayXD ):
UpperCAmelCase_ : int = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase, datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ : Union[str, Any] = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase_ : Dict = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase, datasets.Sequence ):
while isinstance(__lowerCamelCase, datasets.Sequence ):
UpperCAmelCase_ : Dict = v.feature
UpperCAmelCase_ : str = seq_shapes[k]
UpperCAmelCase_ : List[Any] = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
UpperCAmelCase_ : Optional[Any] = data
dummy_data.append((i, example) )
return dummy_data
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ):
UpperCAmelCase_ : List[Any] = generate_examples(__lowerCamelCase, num_examples=__lowerCamelCase, seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase, path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ : Optional[int] = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
UpperCAmelCase_ : Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase_ : Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCamelCase, info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_a = logging.get_logger(__name__)
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = question_encoder
UpperCAmelCase_ : int = generator
UpperCAmelCase_ : List[Any] = self.question_encoder
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if os.path.isfile(lowercase_ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase_ : int = os.path.join(lowercase_ , "question_encoder_tokenizer" )
UpperCAmelCase_ : Any = os.path.join(lowercase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ : Optional[int] = kwargs.pop("config" , lowercase_ )
if config is None:
UpperCAmelCase_ : Dict = RagConfig.from_pretrained(lowercase_ )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.generator.decode(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.question_encoder
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.generator
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
"""simple docstring"""
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowercase_ , )
if max_length is None:
UpperCAmelCase_ : int = self.current_tokenizer.model_max_length
UpperCAmelCase_ : Union[str, Any] = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ : List[str] = self.current_tokenizer.model_max_length
UpperCAmelCase_ : Union[str, Any] = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : List[Any] = labels["input_ids"]
return model_inputs
| 355 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : Dict = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = (1 - _cos) / 2
UpperCAmelCase_ : Optional[Any] = 1 - _cos
UpperCAmelCase_ : Tuple = 1 + alpha
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase_ : List[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Tuple = cos(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = (1 + _cos) / 2
UpperCAmelCase_ : Optional[Any] = -1 - _cos
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Union[str, Any] = 1 - alpha
UpperCAmelCase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : str = cos(__lowerCamelCase )
UpperCAmelCase_ : Dict = _sin / (2 * q_factor)
UpperCAmelCase_ : str = _sin / 2
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : int = -ba
UpperCAmelCase_ : int = 1 + alpha
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : str = 1 - alpha
UpperCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : Any = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Any = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = 1 - alpha
UpperCAmelCase_ : Union[str, Any] = -2 * _cos
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Optional[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : Optional[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : str = 1 + alpha * big_a
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase_ : List[str] = 1 + alpha / big_a
UpperCAmelCase_ : List[Any] = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase_ : Optional[int] = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Optional[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : str = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : List[str] = big_a * (pmc + aaa)
UpperCAmelCase_ : Any = 2 * big_a * mpc
UpperCAmelCase_ : List[str] = big_a * (pmc - aaa)
UpperCAmelCase_ : Dict = ppmc + aaa
UpperCAmelCase_ : Optional[Any] = -2 * pmpc
UpperCAmelCase_ : Union[str, Any] = ppmc - aaa
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Optional[int] = tau * frequency / samplerate
UpperCAmelCase_ : List[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : Optional[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Any = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Any = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : int = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : Dict = big_a * (ppmc + aaa)
UpperCAmelCase_ : Optional[int] = -2 * big_a * pmpc
UpperCAmelCase_ : Union[str, Any] = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[Any] = pmc + aaa
UpperCAmelCase_ : Any = 2 * mpc
UpperCAmelCase_ : List[str] = pmc - aaa
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """llama"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["""past_key_values"""]
def __init__( self , lowercase_=3_2000 , lowercase_=4096 , lowercase_=1_1008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.02 , lowercase_=1E-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = num_key_value_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = rms_norm_eps
UpperCAmelCase_ : str = pretraining_tp
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
UpperCAmelCase_ : Any = self.rope_scaling.get("type" , lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.rope_scaling.get("factor" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 357 |
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Union[str, Any] = resample
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[Any] = do_convert_rgb
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : str = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : str = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 358 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23 | 0 |
_a = 8.31_4462 # Unit - J mol-1 K-1
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = [0] * len(__lowerCamelCase )
for i in range(1, len(__lowerCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase_ : str = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase_ : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase_ : Dict = j
return prefix_result
def __a ( __lowerCamelCase ):
return max(prefix_function(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 0 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""") | 361 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 | 0 |
import sys
import turtle
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(__lowerCamelCase, get_mid(__lowerCamelCase, __lowerCamelCase ), get_mid(__lowerCamelCase, __lowerCamelCase ), depth - 1 )
triangle(__lowerCamelCase, get_mid(__lowerCamelCase, __lowerCamelCase ), get_mid(__lowerCamelCase, __lowerCamelCase ), depth - 1 )
triangle(__lowerCamelCase, get_mid(__lowerCamelCase, __lowerCamelCase ), get_mid(__lowerCamelCase, __lowerCamelCase ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_a = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_a = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 362 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 363 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 364 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.model'}
_a = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_a = {
'google/rembert': 256,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="[CLS]" , lowercase_="[SEP]" , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ):
"""simple docstring"""
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : str = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Union[str, Any] = None
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = d
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.sp_model.EncodeAsPieces(lowercase_ )
return pieces
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.sp_model.decode_pieces(lowercase_ )
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 365 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ : Optional[int] = parent.find_all(child.name , recursive=lowercase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowercase_ ) else next(i for i, s in enumerate(lowercase_ , 1 ) if s is child ) )
UpperCAmelCase_ : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BeautifulSoup(lowercase_ , "html.parser" )
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
for element in html_code.descendants:
if type(lowercase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ : List[str] = html.unescape(lowercase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase_ )
UpperCAmelCase_ : List[Any] = self.xpath_soup(lowercase_ )
stringaxtag_seq.append(lowercase_ )
stringaxsubs_seq.append(lowercase_ )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ""
for tagname, subs in zip(lowercase_ , lowercase_ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = False
# Check that strings has a valid type
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[str] = True
elif isinstance(lowercase_ , (list, tuple) ):
if len(lowercase_ ) == 0 or isinstance(html_strings[0] , lowercase_ ):
UpperCAmelCase_ : List[str] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(lowercase_ )}.""" )
UpperCAmelCase_ : str = bool(isinstance(lowercase_ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase_ )) )
if not is_batched:
UpperCAmelCase_ : Tuple = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[Any] = []
for html_string in html_strings:
UpperCAmelCase_ : Tuple = self.get_three_from_single(lowercase_ )
nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = []
for node, tag_list, sub_list in zip(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase_ : Any = self.construct_xpath(lowercase_ , lowercase_ )
xpath_strings.append(lowercase_ )
xpaths.append(lowercase_ )
# return as Dict
UpperCAmelCase_ : Optional[int] = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ : Union[str, Any] = BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
return encoded_inputs
| 366 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_a = logging.get_logger('transformers.models.speecht5')
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
hf_model.apply_weight_norm()
UpperCAmelCase_ : Any = checkpoint["input_conv.weight_g"]
UpperCAmelCase_ : Any = checkpoint["input_conv.weight_v"]
UpperCAmelCase_ : List[str] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ : int = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase_ : Dict = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase_ : Optional[int] = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase_ : str = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase_ : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase_ : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase_ : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase_ : int = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase_ : Union[str, Any] = checkpoint["output_conv.1.weight_g"]
UpperCAmelCase_ : str = checkpoint["output_conv.1.weight_v"]
UpperCAmelCase_ : List[str] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, ):
if config_path is not None:
UpperCAmelCase_ : Any = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase_ : List[str] = SpeechTaHifiGan(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"], __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.load(__lowerCamelCase )
UpperCAmelCase_ : Dict = stats[0].reshape(-1 )
UpperCAmelCase_ : Optional[int] = stats[1].reshape(-1 )
UpperCAmelCase_ : Tuple = torch.from_numpy(__lowerCamelCase ).float()
UpperCAmelCase_ : Dict = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """MCTCTFeatureExtractor"""
SCREAMING_SNAKE_CASE__ : Dict = """AutoTokenizer"""
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ )
UpperCAmelCase_ : int = self.feature_extractor
UpperCAmelCase_ : Any = False
def __call__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ : List[Any] = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ : List[Any] = kwargs.pop("audio" , lowercase_ )
UpperCAmelCase_ : List[Any] = kwargs.pop("sampling_rate" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("text" , lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase_ : Dict = args[0]
UpperCAmelCase_ : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ : List[Any] = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
if text is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : Union[str, Any] = encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_ , **lowercase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("input_features" , lowercase_ )
UpperCAmelCase_ : Tuple = kwargs.pop("labels" , lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase_ : Optional[Any] = args[0]
UpperCAmelCase_ : str = args[1:]
if input_features is not None:
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
if labels is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.pad(lowercase_ , **lowercase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ : Union[str, Any] = labels["input_ids"]
return input_features
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[str] = self.tokenizer
yield
UpperCAmelCase_ : List[str] = self.feature_extractor
UpperCAmelCase_ : Optional[Any] = False
| 368 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class A_ (logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , *lowercase_ , **lowercase_ ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCAmelCase_ : Dict = kwargs.pop("main_process_only" , lowercase_ )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("in_order" , lowercase_ )
if self.isEnabledFor(lowercase_ ):
if self._should_log(lowercase_ ):
UpperCAmelCase_ : str = self.process(lowercase_ , lowercase_ )
self.logger.log(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ )
elif in_order:
UpperCAmelCase_ : int = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase_ : Dict = self.process(lowercase_ , lowercase_ )
self.logger.log(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ )
state.wait_for_everyone()
def __a ( __lowerCamelCase, __lowerCamelCase = None ) -> List[Any]:
if log_level is None:
UpperCAmelCase_ : str = os.environ.get("ACCELERATE_LOG_LEVEL", __lowerCamelCase )
UpperCAmelCase_ : Dict = logging.getLogger(__lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowerCamelCase, {} )
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_a = parser.parse_args()
_a = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 370 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a = logging.get_logger(__name__)
_a = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , **lowercase_ ):
"""simple docstring"""
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
UpperCAmelCase_ : List[str] = model
UpperCAmelCase_ : Dict = kwargs.get("model_save_dir" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.get("latest_model_name" , lowercase_ )
def __call__( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = {k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ , lowercase_ )
@staticmethod
def UpperCamelCase__ ( lowercase_ , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
UpperCAmelCase_ : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
UpperCAmelCase_ : Tuple = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
def UpperCamelCase__ ( self , lowercase_ , **lowercase_ , ):
"""simple docstring"""
if os.path.isfile(lowercase_ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
UpperCAmelCase_ : str = OnnxRuntimeModel.load_model(
os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ )
UpperCAmelCase_ : List[Any] = Path(lowercase_ )
# load model from hub
else:
# download model
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(
repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , )
UpperCAmelCase_ : str = Path(lowercase_ ).parent
UpperCAmelCase_ : Dict = Path(lowercase_ ).name
UpperCAmelCase_ : Optional[int] = OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ )
return cls(model=lowercase_ , **lowercase_ )
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = None
if len(str(lowercase_ ).split("@" ) ) == 2:
UpperCAmelCase_ : Dict = model_id.split("@" )
return cls._from_pretrained(
model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_a = logging.getLogger(__name__)
_a = 50 # max width of layer names
_a = 70 # max width of quantizer names
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec", type=__lowerCamelCase, default=8, help="weight precision" )
group.add_argument("--aprec", type=__lowerCamelCase, default=8, help="activation precision" )
group.add_argument("--quant-per-tensor", action="store_true", help="per tensor weight scaling" )
group.add_argument("--quant-disable", action="store_true", help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings", action="store_true", help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword", type=__lowerCamelCase, nargs="+", help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module", type=__lowerCamelCase, help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module", type=__lowerCamelCase, help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator", default="max", help="which quantization range calibrator to use" )
group.add_argument("--percentile", default=__lowerCamelCase, type=__lowerCamelCase, help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv", action="store_true", help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu", metavar="N", type=__lowerCamelCase, help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights", action="store_true", help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
), )
def __a ( __lowerCamelCase ):
if args.calibrator == "max":
UpperCAmelCase_ : Union[str, Any] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase_ : Optional[int] = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase_ : Union[str, Any] = "histogram"
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
UpperCAmelCase_ : str = QuantDescriptor(num_bits=args.aprec, calib_method=__lowerCamelCase )
UpperCAmelCase_ : str = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False ):
logger.info("Configuring Model for Quantization" )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCamelCase, ["embeddings"], which="weight", _disabled=__lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCamelCase, [""], _disabled=__lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCamelCase, args.quant_disable_keyword, _disabled=__lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCamelCase, [r"layer.\d+." + args.quant_disable_layer_module], _disabled=__lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCamelCase, [r"layer.\d+." + args.quant_enable_layer_module], _disabled=__lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCamelCase, __lowerCamelCase )
if args.clip_gelu:
clip_gelu(__lowerCamelCase, args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCamelCase )
def __a ( __lowerCamelCase ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile", percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
def fusea(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCamelCase, "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase_ : Tuple = qq._amax.detach().item()
UpperCAmelCase_ : List[Any] = qk._amax.detach().item()
UpperCAmelCase_ : Dict = qv._amax.detach().item()
UpperCAmelCase_ : Dict = max(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
qq._amax.fill_(__lowerCamelCase )
qk._amax.fill_(__lowerCamelCase )
qv._amax.fill_(__lowerCamelCase )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer )
def __a ( __lowerCamelCase, __lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase_ : Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def __a ( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase, "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase_ : Optional[Any] = mod.weight.shape[0]
UpperCAmelCase_ : int = mod._weight_quantizer._amax.detach()
UpperCAmelCase_ : List[Any] = torch.ones(__lowerCamelCase, dtype=amax.dtype, device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def __a ( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase, "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer, "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase_ : Optional[int] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase_ : int = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight, axis=__lowerCamelCase, keepdims=__lowerCamelCase ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
UpperCAmelCase_ : int = amax
def __a ( __lowerCamelCase, __lowerCamelCase=25, __lowerCamelCase=180, __lowerCamelCase=None ):
if ignore is None:
UpperCAmelCase_ : List[Any] = []
elif not isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [ignore]
UpperCAmelCase_ : Dict = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCamelCase, "weight" ):
continue
UpperCAmelCase_ : List[Any] = max(__lowerCamelCase, len(__lowerCamelCase ) )
for name, mod in model.named_modules():
UpperCAmelCase_ : List[Any] = getattr(__lowerCamelCase, "_input_quantizer", __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = getattr(__lowerCamelCase, "_weight_quantizer", __lowerCamelCase )
if not hasattr(__lowerCamelCase, "weight" ):
continue
if type(__lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCamelCase ) is str and s in name]:
continue
UpperCAmelCase_ : Optional[int] = f"""Act:{input_q.extra_repr()}"""
UpperCAmelCase_ : Optional[int] = f"""Wgt:{weight_q.extra_repr()}"""
UpperCAmelCase_ : List[str] = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(__lowerCamelCase ) <= line_width:
logger.info(__lowerCamelCase )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = 0
for name, mod in model.named_modules():
if isinstance(__lowerCamelCase, pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = getattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCamelCase, __lowerCamelCase )
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="both", **__lowerCamelCase ):
UpperCAmelCase_ : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(__lowerCamelCase, __lowerCamelCase, "_input_quantizer", __lowerCamelCase, __lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCamelCase, __lowerCamelCase, "_weight_quantizer", __lowerCamelCase, __lowerCamelCase )
logger.info(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase, "_input_quantizer" ) or hasattr(__lowerCamelCase, "_weight_quantizer" ):
for n in names:
if re.search(__lowerCamelCase, __lowerCamelCase ):
set_quantizers(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
logger.info(__lowerCamelCase )
| 350 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = AltDiffusionPipeline
SCREAMING_SNAKE_CASE__ : str = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCAmelCase_ : Tuple = CLIPTextModel(lowercase_ )
UpperCAmelCase_ : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ : Tuple = 77
UpperCAmelCase_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Optional[Any] = RobertaSeriesModelWithTransformation(lowercase_ )
UpperCAmelCase_ : int = text_encoder
UpperCAmelCase_ : Optional[Any] = AltDiffusionPipeline(**lowercase_ )
UpperCAmelCase_ : Dict = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : Dict = "A photo of an astronaut"
UpperCAmelCase_ : List[Any] = alt_pipe(**lowercase_ )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Dict = RobertaSeriesModelWithTransformation(lowercase_ )
UpperCAmelCase_ : Dict = text_encoder
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline(**lowercase_ )
UpperCAmelCase_ : Any = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : int = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : Optional[Any] = alt_pipe(**lowercase_ )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : int = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = alt_pipe([prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
UpperCAmelCase_ : List[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowercase_ , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[Any] = alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = alt_pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type="numpy" )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase_ : str = 6
UpperCAmelCase_ : List[Any] = 128
UpperCAmelCase_ : int = (2, 2, 18, 2)
UpperCAmelCase_ : Optional[Any] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Any = 12
UpperCAmelCase_ : Tuple = 192
UpperCAmelCase_ : Dict = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : Optional[int] = embed_dim
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : Dict = num_heads
return config
def __a ( __lowerCamelCase ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.mask_token", "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("encoder.patch_embed.norm", "embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Any = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : int = name.replace("attn", "attention.self" )
if "norm1" in name:
UpperCAmelCase_ : List[Any] = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : str = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[Any] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : Optional[int] = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : str = "swin." + name
return name
def __a ( __lowerCamelCase, __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : List[str] = orig_state_dict.pop(__lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[Any] = key.split("." )
UpperCAmelCase_ : List[str] = int(key_split[2] )
UpperCAmelCase_ : Dict = int(key_split[4] )
UpperCAmelCase_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[str] = val[:dim, :]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase_ : Tuple = val[
:dim
]
UpperCAmelCase_ : List[str] = val[
dim : dim * 2
]
UpperCAmelCase_ : Any = val[
-dim:
]
else:
UpperCAmelCase_ : List[str] = val
return orig_state_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" )["model"]
UpperCAmelCase_ : List[Any] = get_swin_config(__lowerCamelCase )
UpperCAmelCase_ : Dict = SwinForMaskedImageModeling(__lowerCamelCase )
model.eval()
UpperCAmelCase_ : int = convert_state_dict(__lowerCamelCase, __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = ViTImageProcessor(size={"height": 192, "width": 192} )
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
UpperCAmelCase_ : Any = image_processor(images=__lowerCamelCase, return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**__lowerCamelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 355 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A_ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowercase_ , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowercase_ )
class A_ (datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowercase_ , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowercase_ )
def __a ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __a ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A_ (lowercase__ ):
'''simple docstring'''
@require_beam
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : str = DummyBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowercase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase__ ( self ):
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase_ : Any = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : List[str] = DummyBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase_ : Optional[int] = partial(lowercase_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowercase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : str = DummyBeamDataset(cache_dir=lowercase_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : Tuple = NestedBeamDataset(cache_dir=lowercase_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase_ : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowercase_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowercase_ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowercase_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_a = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
_a = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = ' Hello world! cécé herlolip'
_a = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = val
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = torch.load(__lowerCamelCase, map_location="cpu" )
UpperCAmelCase_ : Dict = torch.hub.load("pytorch/fairseq", "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = emb.weight.shape
UpperCAmelCase_ : Dict = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
if not os.path.exists(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = torch.hub.load("pytorch/fairseq", __lowerCamelCase ).eval()
else:
UpperCAmelCase_ : str = load_xsum_checkpoint(__lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCAmelCase_ : Optional[Any] = checkpoint_path.replace(".", "-" )
UpperCAmelCase_ : int = BartConfig.from_pretrained(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = bart.encode(__lowerCamelCase ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase, return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__lowerCamelCase, __lowerCamelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCAmelCase_ : Optional[int] = bart.state_dict()
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Any = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Any = BartForSequenceClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = bart.predict("mnli", __lowerCamelCase, return_logits=__lowerCamelCase )
UpperCAmelCase_ : Any = model(__lowerCamelCase )[0] # logits
else: # no classification heads to worry about
UpperCAmelCase_ : Tuple = bart.model.state_dict()
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Dict = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase_ : Optional[Any] = bart.extract_features(__lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
UpperCAmelCase_ : Optional[int] = BartModel(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase ).model[0]
else:
UpperCAmelCase_ : Tuple = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCamelCase )
if hasattr(__lowerCamelCase, "lm_head" ):
UpperCAmelCase_ : int = make_linear_from_emb(model.model.shared )
UpperCAmelCase_ : Dict = model.model(__lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
_a = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 357 |
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ (lowercase__ ,lowercase__ ,lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : int = nn.Embedding(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = nn.Embedding(lowercase_ , lowercase_ )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Tuple = nn.Dropout(p=lowercase_ )
UpperCAmelCase_ : Optional[int] = TaConfig(
vocab_size=lowercase_ , d_model=lowercase_ , num_heads=lowercase_ , d_kv=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , feed_forward_proj=lowercase_ , is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , )
UpperCAmelCase_ : int = nn.ModuleList()
for lyr_num in range(lowercase_ ):
UpperCAmelCase_ : int = TaBlock(lowercase_ )
self.encoders.append(lowercase_ )
UpperCAmelCase_ : Optional[int] = TaLayerNorm(lowercase_ )
UpperCAmelCase_ : Optional[int] = nn.Dropout(p=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.token_embedder(lowercase_ )
UpperCAmelCase_ : List[Any] = encoder_input_tokens.shape[1]
UpperCAmelCase_ : str = torch.arange(lowercase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.dropout_pre(lowercase_ )
# inverted the attention mask
UpperCAmelCase_ : str = encoder_input_tokens.size()
UpperCAmelCase_ : Optional[int] = self.get_extended_attention_mask(lowercase_ , lowercase_ )
for lyr in self.encoders:
UpperCAmelCase_ : Any = lyr(lowercase_ , lowercase_ )[0]
UpperCAmelCase_ : List[str] = self.layer_norm(lowercase_ )
return self.dropout_post(lowercase_ ), encoder_inputs_mask
| 359 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
UpperCAmelCase_ : Dict = 1024
UpperCAmelCase_ : List[str] = 4096
UpperCAmelCase_ : Union[str, Any] = 24
UpperCAmelCase_ : str = 16
UpperCAmelCase_ : Tuple = [5, 11, 17, 23]
UpperCAmelCase_ : int = [256, 512, 1024, 1024]
UpperCAmelCase_ : Any = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = 768
UpperCAmelCase_ : List[str] = [1, 1, 1, 0.5]
UpperCAmelCase_ : str = [256, 512, 768, 768]
UpperCAmelCase_ : str = 150
UpperCAmelCase_ : List[Any] = 16
UpperCAmelCase_ : Optional[Any] = (1, 384, 384)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = "project"
if "ade" in checkpoint_url:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Dict = 768
UpperCAmelCase_ : Any = [1, 1, 1, 0.5]
UpperCAmelCase_ : Dict = 150
UpperCAmelCase_ : str = 16
UpperCAmelCase_ : Optional[int] = "huggingface/label-files"
UpperCAmelCase_ : Dict = "ade20k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ) ), "r" ) )
UpperCAmelCase_ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase_ : int = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
UpperCAmelCase_ : int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
UpperCAmelCase_ : List[Any] = name.replace("patch_embed", "" )
if "pos_embed" in name:
UpperCAmelCase_ : int = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCAmelCase_ : Tuple = name.replace("proj", "projection" )
if "blocks" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
UpperCAmelCase_ : Any = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase_ : Tuple = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
UpperCAmelCase_ : List[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
UpperCAmelCase_ : int = name.replace("scratch", "neck" )
if "layer1_rn" in name:
UpperCAmelCase_ : Dict = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
UpperCAmelCase_ : Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
UpperCAmelCase_ : List[Any] = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
UpperCAmelCase_ : Tuple = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
UpperCAmelCase_ : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase_ : List[Any] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase_ : List[str] = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
UpperCAmelCase_ : Optional[int] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
UpperCAmelCase_ : int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("conv1", "convolution1" )
if "conv2" in name:
UpperCAmelCase_ : int = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase_ : List[Any] = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase_ : List[Any] = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase_ : Any = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase_ : str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase_ : Any = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase_ : Dict = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCAmelCase_ : Any = name.replace("pretrained", "dpt" )
if "bn" in name:
UpperCAmelCase_ : List[str] = name.replace("bn", "batch_norm" )
if "head" in name:
UpperCAmelCase_ : List[str] = name.replace("head", "head.head" )
if "encoder.norm" in name:
UpperCAmelCase_ : int = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
UpperCAmelCase_ : Any = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
UpperCAmelCase_ : List[Any] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("..", "." )
if "stem.conv" in name:
UpperCAmelCase_ : str = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ : Dict = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
UpperCAmelCase_ : str = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
UpperCAmelCase_ : Dict = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase_ : List[str] = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
UpperCAmelCase_ : str = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def __a ( __lowerCamelCase, __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : List[Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __a ( ):
UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase_ : Union[str, Any] = torch.load(__lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase_ : Optional[Any] = state_dict.pop(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase_ : List[Any] = 480 if "ade" in checkpoint_url else 384
UpperCAmelCase_ : List[str] = DPTImageProcessor(size=__lowerCamelCase )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : int = image_processor(__lowerCamelCase, return_tensors="pt" )
# forward pass
UpperCAmelCase_ : int = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
UpperCAmelCase_ : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=__lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_a = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __a ( __lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
UpperCAmelCase_ : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" )
UpperCAmelCase_ : Optional[Any] = soup.findAll("h1" )
UpperCAmelCase_ : Union[str, Any] = soup.findAll("div", {"class": "maincounter-number"} )
keys += soup.findAll("span", {"class": "panel-title"} )
values += soup.findAll("div", {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase, __lowerCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 361 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BlenderbotConfig
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , ):
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : int = eos_token_id
UpperCAmelCase_ : Optional[int] = pad_token_id
UpperCAmelCase_ : Optional[int] = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : Tuple = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFBlenderbotModel(config=lowercase_ ).get_decoder()
UpperCAmelCase_ : Optional[Any] = inputs_dict["input_ids"]
UpperCAmelCase_ : int = input_ids[:1, :]
UpperCAmelCase_ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : Optional[Any] = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : int = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : int = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = TFBlenderbotModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""My friends are cool but they eat too many carbs."""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 362 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = 'http://www.mocksite.com/file1.txt'
_a = '"text": ["foo", "foo"]'
_a = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 200
SCREAMING_SNAKE_CASE__ : Dict = {"""Content-Length""": """100"""}
SCREAMING_SNAKE_CASE__ : Dict = {}
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return [bytes(lowercase_ , "utf-8" )]
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return MockResponse()
@pytest.mark.parametrize("urls_type", [str, list, dict] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
import requests
monkeypatch.setattr(__lowerCamelCase, "request", __lowerCamelCase )
UpperCAmelCase_ : Tuple = URL
if issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = url
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = [url]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = {"train": url}
UpperCAmelCase_ : Union[str, Any] = "dummy"
UpperCAmelCase_ : Optional[Any] = "downloads"
UpperCAmelCase_ : Optional[int] = tmp_path
UpperCAmelCase_ : List[str] = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase, __lowerCamelCase ), use_etag=__lowerCamelCase, )
UpperCAmelCase_ : List[Any] = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = dl_manager.download(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = [downloaded_paths]
UpperCAmelCase_ : int = [urls]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase_ : Tuple = downloaded_paths.values()
UpperCAmelCase_ : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase, __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase_ : List[str] = Path(__lowerCamelCase )
UpperCAmelCase_ : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase_ : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase_ : str = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
UpperCAmelCase_ : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type", [str, list, dict] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = str(__lowerCamelCase )
if issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = filename
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [filename]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = {"train": filename}
UpperCAmelCase_ : Optional[int] = "dummy"
UpperCAmelCase_ : List[Any] = xz_file.parent
UpperCAmelCase_ : List[Any] = "extracted"
UpperCAmelCase_ : Tuple = DownloadConfig(
cache_dir=__lowerCamelCase, use_etag=__lowerCamelCase, )
UpperCAmelCase_ : Any = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
UpperCAmelCase_ : List[str] = dl_manager.extract(__lowerCamelCase )
UpperCAmelCase_ : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [extracted_paths]
UpperCAmelCase_ : str = [paths]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in extracted_paths.keys()
UpperCAmelCase_ : str = extracted_paths.values()
UpperCAmelCase_ : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase, __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase_ : Union[str, Any] = Path(__lowerCamelCase )
UpperCAmelCase_ : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase, etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase_ : List[str] = extracted_path.read_text()
UpperCAmelCase_ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def __a ( __lowerCamelCase, __lowerCamelCase ):
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowerCamelCase, start=1 ):
UpperCAmelCase_ : Any = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"] )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = request.getfixturevalue(__lowerCamelCase )
UpperCAmelCase_ : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = request.getfixturevalue(__lowerCamelCase )
UpperCAmelCase_ : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ), start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 363 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __a ( __lowerCamelCase ):
return (gray > 127) & (gray <= 255)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = np.zeros_like(__lowerCamelCase )
UpperCAmelCase_ : Any = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_a = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_a = np.array(Image.open(lena_path))
# kernel to be applied
_a = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_a = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_a = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 364 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = a.name
UpperCAmelCase_ : Any = b.name
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Dict = ""
UpperCAmelCase_ : List[Any] = a == b
UpperCAmelCase_ : List[Any] = name_a
UpperCAmelCase_ : Any = name_b
return res
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCamelCase, __lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g, __lowerCamelCase, __lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = list(model.graph.initializer )
UpperCAmelCase_ : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ : List[str] = inits[i].name
UpperCAmelCase_ : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, __lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : str = os.path.dirname(__lowerCamelCase )
UpperCAmelCase_ : Dict = os.path.basename(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = onnx.load(os.path.join(__lowerCamelCase, __lowerCamelCase ) )
UpperCAmelCase_ : Dict = list(model.graph.initializer )
UpperCAmelCase_ : Any = set()
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : str = 0
for i in range(len(__lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(__lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(__lowerCamelCase )
dup_set.add(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = inits[j].data_type
UpperCAmelCase_ : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: ", __lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase_ : Optional[Any] = inits[i].name
UpperCAmelCase_ : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCamelCase )
else:
UpperCAmelCase_ : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: ", total_reduced_size / 1024 / 1024 / 1024, "GB" )
UpperCAmelCase_ : str = sorted(__lowerCamelCase )
_remove_dup_initializers_from_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = "optimized_" + model_file_name
UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
onnx.save(__lowerCamelCase, __lowerCamelCase )
return new_model
| 365 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : Dict = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Any = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : List[Any] = image_std
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "image_mean" ) )
self.assertTrue(hasattr(lowercase_ , "image_std" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 366 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_a = logging.get_logger(__name__)
_a = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_a = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_a = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_a = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_a = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_a = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_a = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_a = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_a = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_a = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_a = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_a = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_a = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_a = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = FLAX_MODEL_MAPPING
_a = auto_class_update(FlaxAutoModel)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_a = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_a = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_a = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_a = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 367 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ : str = CLIPTextModel(lowercase_ )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : List[str] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , )
UpperCAmelCase_ : int = floats_tensor(control_image.shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : str = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" ).resize((64, 64) )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
SCREAMING_SNAKE_CASE__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase_ ):
if isinstance(lowercase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_ : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ : Optional[int] = CLIPTextModel(lowercase_ )
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : int = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
]
UpperCAmelCase_ : str = floats_tensor(control_image[0].shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" ).resize((64, 64) )
UpperCAmelCase_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
UpperCAmelCase_ : Any = 10.0
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : str = steps
UpperCAmelCase_ : Optional[int] = scale
UpperCAmelCase_ : Union[str, Any] = pipe(**lowercase_ )[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = steps
UpperCAmelCase_ : List[Any] = scale
UpperCAmelCase_ : Union[str, Any] = pipe(**lowercase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = steps
UpperCAmelCase_ : Union[str, Any] = scale
UpperCAmelCase_ : int = pipe(**lowercase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = steps
UpperCAmelCase_ : Union[str, Any] = scale
UpperCAmelCase_ : Union[str, Any] = pipe(**lowercase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowercase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
UpperCAmelCase_ : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=lowercase_ , controlnet=lowercase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : List[str] = "evil space-punk bird"
UpperCAmelCase_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
UpperCAmelCase_ : Optional[int] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
UpperCAmelCase_ : Union[str, Any] = pipe(
lowercase_ , lowercase_ , control_image=lowercase_ , generator=lowercase_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 368 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = emb.weight.shape
UpperCAmelCase_ : List[str] = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
UpperCAmelCase_ : Any = emb.weight.data
return lin_layer
def __a ( __lowerCamelCase, __lowerCamelCase=None ) -> int:
UpperCAmelCase_ : str = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : str = key.replace("moe_layer.experts.0", f"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase_ : Union[str, Any] = key.replace("moe_layer.experts.", "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase_ : int = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Tuple = key.replace(".fc2.", ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : str = key.replace(".fc1.", ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase_ : Any = key.replace(".encoder_attn.", ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : str = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase_ : Optional[Any] = key.replace("final_layer_norm", "ff_layer_norm" )
UpperCAmelCase_ : str = state_dict[old_key]
return new_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = WEIGHTS_NAME ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = 0
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
UpperCAmelCase_ : str = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = os.path.join(
__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(__lowerCamelCase, __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
UpperCAmelCase_ : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
torch.save(__lowerCamelCase, __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase, __lowerCamelCase )
# Otherwise, let's build the index
UpperCAmelCase_ : Tuple = {}
for idx, shard in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = weights_name.replace(".bin", f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" )
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
for key in shard:
UpperCAmelCase_ : str = shard_file
# Add the metadata
UpperCAmelCase_ : int = {"total_size": total_size}
UpperCAmelCase_ : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), "w", encoding="utf-8" ) as f:
UpperCAmelCase_ : int = json.dumps(__lowerCamelCase, indent=2, sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_a = parser.parse_args()
_a , _a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_a = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ )
return {"f1": float(lowercase_ ) if score.size == 1 else score}
| 370 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __a ( __lowerCamelCase ) -> int:
UpperCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> Optional[int]:
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : int = x_den * y_den * z_den
UpperCAmelCase_ : int = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __a ( __lowerCamelCase = 35 ) -> int:
UpperCAmelCase_ : set = set()
UpperCAmelCase_ : int
UpperCAmelCase_ : Fraction = Fraction(0 )
UpperCAmelCase_ : tuple[int, int]
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
UpperCAmelCase_ : Union[str, Any] = x_num * y_den + x_den * y_num
UpperCAmelCase_ : List[str] = x_den * y_den
UpperCAmelCase_ : Tuple = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Union[str, Any] = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
UpperCAmelCase_ : Union[str, Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : int = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
UpperCAmelCase_ : List[Any] = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase_ : Tuple = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Tuple = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
UpperCAmelCase_ : Any = x_num * y_num
UpperCAmelCase_ : Dict = x_den * y_num + x_num * y_den
UpperCAmelCase_ : Any = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[Any] = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase_ : List[Any] = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase_ : List[Any] = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[str] = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=32 , lowercase_=3 , lowercase_=4 , lowercase_=[10, 20, 30, 40] , lowercase_=[2, 2, 3, 2] , lowercase_=True , lowercase_=True , lowercase_=37 , lowercase_="gelu" , lowercase_=10 , lowercase_=0.02 , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Union[str, Any] = num_stages
UpperCAmelCase_ : Optional[Any] = hidden_sizes
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = out_features
UpperCAmelCase_ : Optional[Any] = out_indices
UpperCAmelCase_ : List[str] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = ConvNextModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ConvNextForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = ConvNextBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowercase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = ConvNextBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : str = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ConvNextModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = ConvNextModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowercase_ )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class A_ (unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = ConvNextConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ConvNextModelTester(self )
| 350 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
model.train()
UpperCAmelCase_ : str = model(__lowerCamelCase )
UpperCAmelCase_ : List[str] = F.mse_loss(__lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = RegressionModel()
UpperCAmelCase_ : Tuple = deepcopy(__lowerCamelCase )
UpperCAmelCase_ : Tuple = RegressionDataset(length=80 )
UpperCAmelCase_ : Any = DataLoader(__lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ : Union[str, Any] = AdamW(params=model.parameters(), lr=1E-3 )
UpperCAmelCase_ : str = AdamW(params=ddp_model.parameters(), lr=1E-3 )
UpperCAmelCase_ : Any = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
UpperCAmelCase_ : Optional[Any] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ : int = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
UpperCAmelCase_ : str = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( __lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase_ : Optional[int] = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ : List[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : int = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
UpperCAmelCase_ : str = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ : Optional[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : int = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Tuple = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : int = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ : Optional[int] = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Optional[int] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[Any] = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ : Tuple = get_training_setup(__lowerCamelCase, __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ : Dict = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __a ( ):
UpperCAmelCase_ : int = Accelerator()
UpperCAmelCase_ : Optional[Any] = RegressionDataset(length=80 )
UpperCAmelCase_ : Tuple = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ : int = RegressionDataset(length=96 )
UpperCAmelCase_ : int = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ : Any = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = Accelerator()
UpperCAmelCase_ : Any = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = [], []
while len(__lowerCamelCase ) > 1:
UpperCAmelCase_ : str = min(__lowerCamelCase ), max(__lowerCamelCase )
start.append(__lowerCamelCase )
end.append(__lowerCamelCase )
collection.remove(__lowerCamelCase )
collection.remove(__lowerCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 352 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = checkpoint
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : int = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ : Any = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ : Dict = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ : str = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ : Dict = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ : str = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ : List[str] = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ : int = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ : Dict = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ : List[Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Optional[int] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : str = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : int = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : Dict = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : str = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ : List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : Optional[int] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Dict = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Any = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ : Any = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : str = num_up_blocks - 1 - i
UpperCAmelCase_ : Optional[Any] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Any = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Tuple = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Dict = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ : int = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : Optional[Any] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Any = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Any = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ : Optional[Any] = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
# Only support V1
UpperCAmelCase_ : int = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : Dict = io.BytesIO(r.content )
UpperCAmelCase_ : Tuple = OmegaConf.load(__lowerCamelCase )
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : Union[str, Any] = {}
with safe_open(__lowerCamelCase, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : Union[str, Any] = f.get_tensor(__lowerCamelCase )
else:
UpperCAmelCase_ : Union[str, Any] = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ : Any = create_vae_diffusers_config(__lowerCamelCase, image_size=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : str = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase_ : Optional[int] = convert_pytorch_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ : Optional[int] = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase )
return flax_state_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
UpperCAmelCase_ : Any = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
UpperCAmelCase_ : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ : str = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Dict = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( __lowerCamelCase, __lowerCamelCase ):
# convert pytorch tensor to numpy
UpperCAmelCase_ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ : List[str] = flax_model.params["params"]
else:
UpperCAmelCase_ : List[str] = flax_model.params
UpperCAmelCase_ : Tuple = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Optional[int] = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(__lowerCamelCase )
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : int = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : int = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# add model prefix if necessary
UpperCAmelCase_ : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ : Tuple = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Dict = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import torch
# Load the index
UpperCAmelCase_ : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase )
UpperCAmelCase_ : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Optional[Any] = flax_model.params["params"]
UpperCAmelCase_ : Union[str, Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ : str = flax_model.params
UpperCAmelCase_ : Any = flatten_dict(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : List[Any] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ : str = rename_key_and_reshape_tensor(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# add model prefix if necessary
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ : List[Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ : List[str] = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : List[str] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase_ : List[str] = getattr(__lowerCamelCase, "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase, "rb" ) as state_f:
try:
UpperCAmelCase_ : List[str] = from_bytes(__lowerCamelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa, __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ : Any = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __lowerCamelCase )
UpperCAmelCase_ : Tuple = flatten_dict(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = pt_model.state_dict()
UpperCAmelCase_ : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ : Optional[Any] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : str = jnp.transpose(__lowerCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ : Tuple = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ : Optional[Any] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ : Any = ".".join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ : Union[str, Any] = key.split("." )
UpperCAmelCase_ : int = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ : List[str] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ : str = key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Optional[int] = key_components[:-3] + [name]
UpperCAmelCase_ : Tuple = ".".join(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = key
if flax_key in special_pt_names:
UpperCAmelCase_ : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_ : Any = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase, np.ndarray ) else flax_tensor
UpperCAmelCase_ : str = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
UpperCAmelCase_ : List[str] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 355 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.