code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
inspect_dataset(lowercase__ ,lowercase__ )
a_ = path + ".py"
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
inspect_metric(lowercase__ ,lowercase__ )
a_ = path + ".py"
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
a_ = get_dataset_config_info(lowercase__ ,config_name=lowercase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_config_info(lowercase__ ,config_name=lowercase__ )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = get_dataset_config_names(lowercase__ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = get_dataset_infos(lowercase__ )
assert list(infos.keys() ) == expected_configs
a_ = expected_configs[0]
assert expected_config in infos
a_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
a_ = get_dataset_infos(lowercase__ )
assert expected_config in infos
a_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_split_names(lowercase__ ,config_name=lowercase__ )
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a_ = test_metrics
@require_cpu
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def _lowerCAmelCase ( self: List[Any]) ->Union[str, Any]:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""")
a_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> np.array:
'''simple docstring'''
a_ = int(np.ceil((x_end - xa) / step_size ) )
a_ = np.zeros((n + 1,) )
a_ = ya
a_ = xa
for k in range(lowercase__ ):
a_ = y[k] + step_size * ode_func(lowercase__ ,y[k] )
a_ = y[k] + (
(step_size / 2) * (ode_func(lowercase__ ,y[k] ) + ode_func(x + step_size ,lowercase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ = 'scheduler_config.json'
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =1
_UpperCAmelCase =2
_UpperCAmelCase =3
_UpperCAmelCase =4
_UpperCAmelCase =5
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =42
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =SCHEDULER_CONFIG_NAME
_UpperCAmelCase =['''dtype''']
_UpperCAmelCase =[]
_UpperCAmelCase =True
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , a: Dict[str, Any] = None , a: Optional[str] = None , a: str=False , **a: List[Any] , ) ->Any:
'''simple docstring'''
a_ , a_ = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
a_ , a_ = cls.from_config(a , return_unused_kwargs=a , **a)
if hasattr(a , "create_state") and getattr(a , "has_state" , a):
a_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowerCAmelCase ( self: Any , a: Union[str, os.PathLike] , a: bool = False , **a: Tuple) ->Optional[int]:
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a)
@property
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _lowerCAmelCase ( cls: List[str]) ->List[str]:
'''simple docstring'''
a_ = list(set([cls.__name__] + cls._compatibles))
a_ = importlib.import_module(__name__.split(".")[0])
a_ = [
getattr(a , a) for c in compatible_classes_str if hasattr(a , a)
]
return compatible_classes
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase__ ) - x.ndim) ) ,lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__=0.999 ,lowercase__=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a_ = []
for i in range(lowercase__ ):
a_ = i / num_diffusion_timesteps
a_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase__ ) / alpha_bar(lowercase__ ) ,lowercase__ ) )
return jnp.array(lowercase__ ,dtype=lowercase__ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =42
_UpperCAmelCase =42
@classmethod
def _lowerCAmelCase ( cls: List[str] , a: Optional[Any]) ->Any:
'''simple docstring'''
a_ = scheduler.config
if config.trained_betas is not None:
a_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
a_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""")
a_ = 1.0 - betas
a_ = jnp.cumprod(a , axis=0)
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = state.alphas_cumprod
a_ = alphas_cumprod[timesteps] ** 0.5
a_ = sqrt_alpha_prod.flatten()
a_ = broadcast_to_shape_from_left(lowercase__ ,original_samples.shape )
a_ = (1 - alphas_cumprod[timesteps]) ** 0.5
a_ = sqrt_one_minus_alpha_prod.flatten()
a_ = broadcast_to_shape_from_left(lowercase__ ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
a_ , a_ = get_sqrt_alpha_prod(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
a_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ , a_ = get_sqrt_alpha_prod(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
a_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a_ = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase (lowercase__ ,lowercase__=1.0 ,lowercase__=None ,lowercase__=None ) -> Any:
'''simple docstring'''
if rng is None:
a_ = global_rng
a_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: Tuple , a: str , a: Any=7 , a: Optional[int]=4_00 , a: int=20_00 , a: Any=1 , a: Any=0.0 , a: List[Any]=1_60_00 , a: Any=True , a: str=True , ) ->List[Any]:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = min_seq_length
a_ = max_seq_length
a_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ = feature_size
a_ = padding_value
a_ = sampling_rate
a_ = return_attention_mask
a_ = do_normalize
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self: Dict , a: List[Any]=False , a: Optional[int]=False) ->Optional[Any]:
'''simple docstring'''
def _flatten(a: str):
return list(itertools.chain(*a))
if equal_length:
a_ = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a_ = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a_ = [np.asarray(a) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =ASTFeatureExtractor
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = ASTFeatureExtractionTester(self)
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a_ = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a_ = [np.asarray(a) for speech_input in speech_inputs]
# Test not batched input
a_ = feat_extract(speech_inputs[0] , return_tensors="np").input_values
a_ = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(a , a , atol=1e-3))
# Test batched
a_ = feat_extract(a , padding=a , return_tensors="np").input_values
a_ = feat_extract(a , padding=a , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(a , a):
self.assertTrue(np.allclose(a , a , atol=1e-3))
# Test 2-D numpy arrays are batched.
a_ = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a_ = np.asarray(a)
a_ = feat_extract(a , return_tensors="np").input_values
a_ = feat_extract(a , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(a , a):
self.assertTrue(np.allclose(a , a , atol=1e-3))
@require_torch
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
import torch
a_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ = np.random.rand(1_00).astype(np.floataa)
a_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def _lowerCAmelCase ( self: Dict , a: Optional[int]) ->List[Any]:
'''simple docstring'''
from datasets import load_dataset
a_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
a_ = ds.sort("id").select(range(a))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869])
# fmt: on
a_ = self._load_datasamples(1)
a_ = ASTFeatureExtractor()
a_ = feature_extractor(a , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1e-4))
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =KandinskyVaaPipeline
_UpperCAmelCase =[
'''image_embeds''',
'''negative_image_embeds''',
]
_UpperCAmelCase =['''image_embeds''', '''negative_image_embeds''']
_UpperCAmelCase =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase =False
@property
def _lowerCAmelCase ( self: List[Any]) ->str:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Any:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCAmelCase ( self: str) ->Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
return 1_00
@property
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
torch.manual_seed(0)
a_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a_ = UNetaDConditionModel(**a)
return model
@property
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
torch.manual_seed(0)
a_ = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
a_ = self.dummy_unet
a_ = self.dummy_movq
a_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type="epsilon" , thresholding=a , )
a_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCAmelCase ( self: Optional[int] , a: Dict , a: Any=0) ->List[Any]:
'''simple docstring'''
a_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a)).to(a)
a_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
a)
if str(a).startswith("mps"):
a_ = torch.manual_seed(a)
else:
a_ = torch.Generator(device=a).manual_seed(a)
a_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = "cpu"
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**a)
a_ = pipe.to(a)
pipe.set_progress_bar_config(disable=a)
a_ = pipe(**self.get_dummy_inputs(a))
a_ = output.images
a_ = pipe(
**self.get_dummy_inputs(a) , return_dict=a , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self: List[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy")
a_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(a)
a_ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa)
a_ = pipeline.to(a)
pipeline.set_progress_bar_config(disable=a)
a_ = "red cat, 4k photo"
a_ = torch.Generator(device="cuda").manual_seed(0)
a_ , a_ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a_ = torch.Generator(device="cuda").manual_seed(0)
a_ = pipeline(
image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_00 , output_type="np" , )
a_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(a , a)
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __UpperCAmelCase (lowercase__ ) -> datetime:
'''simple docstring'''
a_ = year % 19
a_ = year % 4
a_ = year % 7
a_ = math.floor(year / 100 )
a_ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a_ = leap_day_inhibits / 4
a_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ ,4 ,19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ ,4 ,18 )
else:
return datetime(lowercase__ ,3 ,22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a_ = 'will be' if year > datetime.now().year else 'was'
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a_ = logging.getLogger(__name__)
@dataclass(frozen=lowercase_ )
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =42
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
@dataclass(frozen=lowercase_ )
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =42
def __init__( self: str , a: str , a: PreTrainedTokenizer , a: str , a: Optional[int] = None , a: Optional[Any]=False , a: bool = False , ) ->str:
'''simple docstring'''
a_ = hans_processors[task]()
a_ = os.path.join(
a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(a) , a , ) , )
a_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ = label_list[2], label_list[1]
a_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ = cached_features_file + ".lock"
with FileLock(a):
if os.path.exists(a) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""")
a_ = torch.load(a)
else:
logger.info(f"""Creating features from dataset file at {data_dir}""")
a_ = (
processor.get_dev_examples(a) if evaluate else processor.get_train_examples(a)
)
logger.info("Training examples: %s" , len(a))
a_ = hans_convert_examples_to_features(a , a , a , a)
logger.info("Saving features into cached file %s" , a)
torch.save(self.features , a)
def __len__( self: str) ->Any:
'''simple docstring'''
return len(self.features)
def __getitem__( self: Optional[int] , a: Any) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def _lowerCAmelCase ( self: Optional[Any]) ->str:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =42
def __init__( self: str , a: str , a: PreTrainedTokenizer , a: str , a: Optional[int] = 1_28 , a: Union[str, Any]=False , a: bool = False , ) ->List[Any]:
'''simple docstring'''
a_ = hans_processors[task]()
a_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ = label_list[2], label_list[1]
a_ = label_list
a_ = processor.get_dev_examples(a) if evaluate else processor.get_train_examples(a)
a_ = hans_convert_examples_to_features(a , a , a , a)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc="convert examples to features"):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(a)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a_ = tf.data.Dataset.from_generator(
a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
return self.dataset
def __len__( self: Dict) ->Optional[Any]:
'''simple docstring'''
return len(self.features)
def __getitem__( self: Optional[int] , a: List[str]) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return self.label_list
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: List[Any] , a: Dict) ->Any:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a , "heuristics_train_set.txt")) , "train")
def _lowerCAmelCase ( self: str , a: Optional[Any]) ->Optional[int]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a , "heuristics_evaluation_set.txt")) , "dev")
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[str]) ->List[Any]:
'''simple docstring'''
a_ = []
for i, line in enumerate(a):
if i == 0:
continue
a_ = "%s-%s" % (set_type, line[0])
a_ = line[5]
a_ = line[6]
a_ = line[7][2:] if line[7].startswith("ex") else line[7]
a_ = line[0]
examples.append(InputExample(guid=a , text_a=a , text_b=a , label=a , pairID=a))
return examples
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> str:
'''simple docstring'''
a_ = {label: i for i, label in enumerate(lowercase__ )}
a_ = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase__ ) ,desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
a_ = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=lowercase__ ,max_length=lowercase__ ,padding="max_length" ,truncation=lowercase__ ,return_overflowing_tokens=lowercase__ ,)
a_ = label_map[example.label] if example.label in label_map else 0
a_ = int(example.pairID )
features.append(InputFeatures(**lowercase__ ,label=lowercase__ ,pairID=lowercase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
a_ = {
'hans': 3,
}
a_ = {
'hans': HansProcessor,
}
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-ๅนด])*(0?[1-9]|1[0-2])[/\-ๆ]((0?[1-9]|[12][0-9]|3[01])ๆฅ?)*(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"(ๆๆฒป|ๅคงๆญฃ|ๆญๅ|ๅนณๆ|ไปคๅ|ใพ|ใฝ|ใผ|ใป|\u32ff)\d{1,2}ๅนด(0?[1-9]|1[0-2])ๆ(0?[1-9]|[12][0-9]|3[01])ๆฅ(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ไธ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(ๅๅ|ไธๅ|ๅไธๅ|ๅ|ๅใใซ|ไธใใซ|ๅไธใใซ|ใใซ|ๅใฆใผใญ|ไธใฆใผใญ|ๅไธใฆใผใญ|ใฆใผใญ)+(\(็จ่พผ\)|\(็จๆ\)|\+tax)*")
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟ"
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace("ใ" , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("โ" , "ใผ")
a_ = text.replace("โ" , "ใผ")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("โ")
elif word == "<KIGOU>":
words.append("ว")
elif word == "<U2000U2BFF>":
words.append("โ")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =FunnelTokenizer
_UpperCAmelCase =FunnelTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: str) ->Tuple:
'''simple docstring'''
super().setUp()
a_ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Tuple , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: int , **a: Any) ->str:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a)
def _lowerCAmelCase ( self: Tuple , a: str) ->int:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = self.get_tokenizers(do_lower_case=a)
for tokenizer in tokenizers:
a_ = tokenizer("UNwant\u00E9d,running")
a_ = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
a_ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
try:
with open(lowercase__ ,"rb" ) as flax_state_f:
a_ = from_bytes(lowercase__ ,flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase__ ,lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
a_ = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa ,lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
a_ = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,lowercase__ )
a_ = ""
a_ = flatten_dict(lowercase__ ,sep="." )
a_ = pt_model.state_dict()
# keep track of unexpected & missing keys
a_ = []
a_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a_ = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
a_ = flax_key_tuple_array[:-1] + ["weight"]
a_ = jnp.transpose(lowercase__ ,(3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
a_ = flax_key_tuple_array[:-1] + ["weight"]
a_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
a_ = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
a_ = (
flax_key_tuple_string.replace("_0" ,".0" )
.replace("_1" ,".1" )
.replace("_2" ,".2" )
.replace("_3" ,".3" )
.replace("_4" ,".4" )
.replace("_5" ,".5" )
.replace("_6" ,".6" )
.replace("_7" ,".7" )
.replace("_8" ,".8" )
.replace("_9" ,".9" )
)
a_ = ".".join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
a_ = np.asarray(lowercase__ ) if not isinstance(lowercase__ ,np.ndarray ) else flax_tensor
a_ = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
a_ = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(lowercase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
a_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
a_ = 'โ'
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase ='''left'''
_UpperCAmelCase =XLNetTokenizer
def __init__( self: List[str] , a: List[Any]=None , a: str=None , a: Optional[Any]=False , a: Optional[int]=True , a: List[Any]=False , a: Tuple="<s>" , a: Optional[Any]="</s>" , a: str="<unk>" , a: List[Any]="<sep>" , a: List[Any]="<pad>" , a: Dict="<cls>" , a: Tuple="<mask>" , a: List[str]=["<eop>", "<eod>"] , **a: Dict , ) ->Dict:
'''simple docstring'''
a_ = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , **a , )
a_ = 3
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = False if not self.vocab_file else True
def _lowerCAmelCase ( self: Union[str, Any] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self: Union[str, Any] , a: List[int] , a: Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
a_ = [self.sep_token_id]
a_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCAmelCase ( self: Dict , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(a):
copyfile(self.vocab_file , a)
return (out_vocab_file,)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000000 ) -> int:
'''simple docstring'''
a_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a_ = 'facebook/wmt19-en-de'
a_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a_ = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
a_ = tokenizer(['Making tiny model'], return_tensors='pt')
a_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
a_ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsรฉ."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowercase__ ), magnitude * sin(lowercase__ )]
return [magnitude * cos(radians(lowercase__ ) ), magnitude * sin(radians(lowercase__ ) )]
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = 10**-1 ) -> bool:
'''simple docstring'''
a_ = cross(lowercase__ ,lowercase__ )
a_ = sum(lowercase__ )
return abs(lowercase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
a_ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
a_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: int , *a: Tuple , **a: Union[str, Any]) ->None:
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =['''pixel_values''']
def __init__( self: Dict , a: bool = True , a: Dict[str, int] = None , a: PILImageResampling = PILImageResampling.BICUBIC , a: bool = True , a: Dict[str, int] = None , a: bool = True , a: Union[int, float] = 1 / 2_55 , a: bool = True , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , a: bool = True , **a: Optional[Any] , ) ->None:
'''simple docstring'''
super().__init__(**a)
a_ = size if size is not None else {"shortest_edge": 2_24}
a_ = get_size_dict(a , default_to_square=a)
a_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
a_ = get_size_dict(a , default_to_square=a , param_name="crop_size")
a_ = do_resize
a_ = size
a_ = resample
a_ = do_center_crop
a_ = crop_size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ = image_std if image_std is not None else OPENAI_CLIP_STD
a_ = do_convert_rgb
def _lowerCAmelCase ( self: Dict , a: np.ndarray , a: Dict[str, int] , a: PILImageResampling = PILImageResampling.BICUBIC , a: Optional[Union[str, ChannelDimension]] = None , **a: Union[str, Any] , ) ->np.ndarray:
'''simple docstring'''
a_ = get_size_dict(a , default_to_square=a)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a_ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a)
return resize(a , size=a , resample=a , data_format=a , **a)
def _lowerCAmelCase ( self: List[Any] , a: np.ndarray , a: Dict[str, int] , a: Optional[Union[str, ChannelDimension]] = None , **a: List[str] , ) ->np.ndarray:
'''simple docstring'''
a_ = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a)
def _lowerCAmelCase ( self: Optional[int] , a: np.ndarray , a: Union[int, float] , a: Optional[Union[str, ChannelDimension]] = None , **a: List[Any] , ) ->int:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a)
def _lowerCAmelCase ( self: Optional[int] , a: np.ndarray , a: Union[float, List[float]] , a: Union[float, List[float]] , a: Optional[Union[str, ChannelDimension]] = None , **a: Optional[int] , ) ->np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a)
def _lowerCAmelCase ( self: int , a: ImageInput , a: bool = None , a: Dict[str, int] = None , a: PILImageResampling = None , a: bool = None , a: int = None , a: bool = None , a: float = None , a: bool = None , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , a: bool = None , a: Optional[Union[str, TensorType]] = None , a: Optional[ChannelDimension] = ChannelDimension.FIRST , **a: Any , ) ->PIL.Image.Image:
'''simple docstring'''
a_ = do_resize if do_resize is not None else self.do_resize
a_ = size if size is not None else self.size
a_ = get_size_dict(a , param_name="size" , default_to_square=a)
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(a , param_name="crop_size" , default_to_square=a)
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ = [convert_to_rgb(a) for image in images]
# All transformations expect numpy arrays.
a_ = [to_numpy_array(a) for image in images]
if do_resize:
a_ = [self.resize(image=a , size=a , resample=a) for image in images]
if do_center_crop:
a_ = [self.center_crop(image=a , size=a) for image in images]
if do_rescale:
a_ = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
a_ = [self.normalize(image=a , mean=a , std=a) for image in images]
a_ = [to_channel_dimension_format(a , a) for image in images]
a_ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
def __lowercase ( snake_case, snake_case = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
__magic_name__ :Optional[int] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
__magic_name__ :List[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(snake_case, 1 ):
if n < _p:
# then we have our last prime to check
__magic_name__ :int = primes[:idx]
break
__magic_name__ , __magic_name__ :Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__magic_name__ :str = False
for r in range(snake_case ):
__magic_name__ :List[Any] = pow(snake_case, d * 2**r, snake_case )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__magic_name__ :int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowercase ( ):
"""simple docstring"""
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCamelCase = i
__UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : list[int] ) -> None:
_A = len(__lowerCAmelCase )
_A = [0] * len_array
if len_array > 0:
_A = array[0]
for i in range(1 , __lowerCAmelCase ):
_A = self.prefix_sum[i - 1] + array[i]
def snake_case_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> bool:
_A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__lowerCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 3 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : Optional[Any] = logging.getLogger()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = json.load(_UpperCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
__UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
lowerCAmelCase = time()
xla_spawn.main()
lowerCAmelCase = time()
lowerCAmelCase = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
xla_spawn.main()
| 4 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , __A , )
@cached_property
def _snake_case ( self :List[Any] ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE__ = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE__ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE__ = smp.local_rank()
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , __A )
SCREAMING_SNAKE_CASE__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
return False | 6 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = TextToVideoSDPipeline
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase : Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCAmelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_A = CLIPTextModel(_UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = TextToVideoSDPipeline(**_UpperCAmelCase )
_A = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = self.get_dummy_inputs(_UpperCAmelCase )
_A = 'np'
_A = sd_pipe(**_UpperCAmelCase ).frames
_A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCAmelCase_ ( self : Dict ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 7 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_snake_case , )
assert hasattr(self , 'env' )
def _a ( self : Tuple , _snake_case : int ):
"""simple docstring"""
A__ = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
A__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version='py36' , )
def _a ( self : Dict , _snake_case : str ):
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _a ( self : List[str] , _snake_case : int ):
"""simple docstring"""
A__ = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _snake_case )
| 9 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase = "src/diffusers"
_lowerCAmelCase = "."
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCAmelCase = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCAmelCase = spec.loader.load_module()
def _snake_case ( __snake_case , __snake_case ):
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , __snake_case ) is not None
def _snake_case ( __snake_case ):
_UpperCamelCase = object_name.split('''.''' )
_UpperCamelCase = 0
# First let's find the module where our object lives.
_UpperCamelCase = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f"""{module}.py""" ) ):
i += 1
if i < len(__snake_case ):
_UpperCamelCase = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__snake_case , f"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase = ''''''
_UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
return "".join(__snake_case )
_lowerCAmelCase = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_lowerCAmelCase = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
_lowerCAmelCase = re.compile(r"<FILL\s+[^>]*>")
def _snake_case ( __snake_case ):
_UpperCamelCase = code.split('''\n''' )
_UpperCamelCase = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _snake_case ( __snake_case ):
_UpperCamelCase = len(get_indent(__snake_case ) ) > 0
if has_indent:
_UpperCamelCase = f"""class Bla:\n{code}"""
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__snake_case )
_UpperCamelCase = black.format_str(__snake_case , mode=__snake_case )
_UpperCamelCase , _UpperCamelCase = style_docstrings_in_code(__snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _snake_case ( __snake_case , __snake_case=False ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = []
_UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
_UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = search.groups()
_UpperCamelCase = find_code_in_diffusers(__snake_case )
_UpperCamelCase = get_indent(__snake_case )
_UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase = theoretical_indent
_UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _should_continue(__snake_case , __snake_case ) and re.search(f"""^{indent}# End copy""" , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
_UpperCamelCase = ''''''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__snake_case ) is None]
_UpperCamelCase = '''\n'''.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
_UpperCamelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_UpperCamelCase = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pattern.groups()
_UpperCamelCase = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
_UpperCamelCase = re.sub(obja.lower() , obja.lower() , __snake_case )
_UpperCamelCase = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
return diffs
def _snake_case ( __snake_case = False ):
_UpperCamelCase = glob.glob(os.path.join(__snake_case , '''**/*.py''' ) , recursive=__snake_case )
_UpperCamelCase = []
for filename in all_files:
_UpperCamelCase = is_copy_consistent(__snake_case , __snake_case )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
_UpperCamelCase = '''\n'''.join(__snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 10 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-ๅนด])*(0?[1-9]|1[0-2])[/\-ๆ]((0?[1-9]|[12][0-9]|3[01])ๆฅ?)*(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"(ๆๆฒป|ๅคงๆญฃ|ๆญๅ|ๅนณๆ|ไปคๅ|ใพ|ใฝ|ใผ|ใป|\u32ff)\d{1,2}ๅนด(0?[1-9]|1[0-2])ๆ(0?[1-9]|[12][0-9]|3[01])ๆฅ(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ไธ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(ๅๅ|ไธๅ|ๅไธๅ|ๅ|ๅใใซ|ไธใใซ|ๅไธใใซ|ใใซ|ๅใฆใผใญ|ไธใฆใผใญ|ๅไธใฆใผใญ|ใฆใผใญ)+(\(็จ่พผ\)|\(็จๆ\)|\+tax)*")
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟ"
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace("ใ" , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("โ" , "ใผ")
a_ = text.replace("โ" , "ใผ")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("โ")
elif word == "<KIGOU>":
words.append("ว")
elif word == "<U2000U2BFF>":
words.append("โ")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __A :
'''simple docstring'''
__lowerCamelCase : List[str] = PegasusConfig
__lowerCamelCase : Any = {}
__lowerCamelCase : int = 'gelu'
def __init__(self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Union[str, Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def a__ (self , A , A ) -> str:
"""simple docstring"""
_a = TFPegasusModel(config=A ).get_decoder()
_a = inputs_dict['''input_ids''']
_a = input_ids[:1, :]
_a = inputs_dict['''attention_mask'''][:1, :]
_a = inputs_dict['''head_mask''']
_a = 1
# first forward pass
_a = model(A , attention_mask=A , head_mask=A , use_cache=A )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a = tf.concat([input_ids, next_tokens] , axis=-1 )
_a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a = model(A , attention_mask=A )[0]
_a = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a = output_from_no_past[:, -3:, random_slice_idx]
_a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1E-3 )
def lowerCAmelCase (__A , __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ):
"""simple docstring"""
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(__A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
_a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
_a = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : List[Any] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : str = True
__lowerCamelCase : Any = False
__lowerCamelCase : Tuple = False
def a__ (self ) -> Dict:
"""simple docstring"""
_a = TFPegasusModelTester(self )
_a = ConfigTester(self , config_class=A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__lowerCamelCase : Dict = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCamelCase : str = 'google/pegasus-xsum'
@cached_property
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ (self , **A ) -> Optional[int]:
"""simple docstring"""
_a = self.translate_src_text(**A )
assert self.expected_text == generated_words
def a__ (self , **A ) -> Optional[int]:
"""simple docstring"""
_a = self.tokenizer(self.src_text , **A , padding=A , return_tensors='''tf''' )
_a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def a__ (self ) -> int:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 11 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( ) -> int:
return 1
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 2_00 ) -> int:
return two_pound(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 13 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __UpperCAmelCase ( __a : Any ) -> str:
"""simple docstring"""
_a : List[str] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' ,__a ).groups()[0]
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> Dict:
_a : Optional[int] = file_names
_a : List[str] = image_transform
_a : List[Any] = label_to_id
def __len__( self ) -> Any:
return len(self.file_names )
def __getitem__( self , _a ) -> Tuple:
_a : Union[str, Any] = self.file_names[idx]
_a : Tuple = PIL.Image.open(_a )
_a : Dict = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_a : Tuple = self.image_transform(_a )
_a : Dict = extract_label(_a )
if self.label_to_id is not None:
_a : Any = self.label_to_id[label]
return {"image": image, "label": label}
def __UpperCAmelCase ( __a : str ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
if args.with_tracking:
_a : Tuple = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir )
else:
_a : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : List[Any] = config['''lr''']
_a : Optional[Any] = int(config['''num_epochs'''] )
_a : List[Any] = int(config['''seed'''] )
_a : Optional[Any] = int(config['''batch_size'''] )
_a : str = config['''image_size''']
if not isinstance(__a ,(list, tuple) ):
_a : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,'''isdigit''' ):
if args.checkpointing_steps == "epoch":
_a : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_a : int = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_a : Optional[int] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_a : List[str] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a ,__a )
# Grab all the image filenames
_a : Dict = [os.path.join(args.data_dir ,__a ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_a : Dict = [extract_label(__a ) for fname in file_names]
_a : str = list(set(__a ) )
id_to_label.sort()
_a : List[str] = {lbl: i for i, lbl in enumerate(__a )}
# Set the seed before splitting the data.
np.random.seed(__a )
torch.manual_seed(__a )
torch.cuda.manual_seed_all(__a )
# Split our filenames between train and validation
_a : str = np.random.permutation(len(__a ) )
_a : List[Any] = int(0.8 * len(__a ) )
_a : Dict = random_perm[:cut]
_a : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_a : Optional[int] = Compose([RandomResizedCrop(__a ,scale=(0.5, 1.0) ), ToTensor()] )
_a : Any = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__a ,label_to_id=__a )
# For evaluation, we use a deterministic Resize
_a : Dict = Compose([Resize(__a ), ToTensor()] )
_a : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__a ,label_to_id=__a )
# Instantiate dataloaders.
_a : Tuple = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 )
_a : Optional[Any] = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = create_model('''resnet50d''' ,pretrained=__a ,num_classes=len(__a ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : int = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_a : Optional[int] = False
for param in model.get_classifier().parameters():
_a : Any = True
# We normalize the batches of images to be a bit faster.
_a : int = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_a : Dict = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_a : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
_a : Optional[int] = OneCycleLR(optimizer=__a ,max_lr=__a ,epochs=__a ,steps_per_epoch=len(__a ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Any = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# We need to keep track of how many total steps we have iterated over
_a : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
_a : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_a : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_a : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_a : List[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_a : Optional[int] = os.path.splitext(__a )[0]
if "epoch" in training_difference:
_a : List[Any] = int(training_difference.replace('''epoch_''' ,'''''' ) ) + 1
_a : str = None
else:
_a : str = int(training_difference.replace('''step_''' ,'''''' ) )
_a : Any = resume_step // len(__a )
resume_step -= starting_epoch * len(__a )
# Now we train the model
for epoch in range(__a ,__a ):
model.train()
if args.with_tracking:
_a : List[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_a : List[Any] = accelerator.skip_first_batches(__a ,__a )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_a : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Dict = (batch['''image'''] - mean) / std
_a : Optional[int] = model(__a )
_a : Union[str, Any] = torch.nn.functional.cross_entropy(__a ,batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__a ,__a ):
_a : List[str] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_a : Dict = os.path.join(args.output_dir ,__a )
accelerator.save_state(__a )
model.eval()
_a : Union[str, Any] = 0
_a : List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Tuple = (batch['''image'''] - mean) / std
with torch.no_grad():
_a : int = model(__a )
_a : Tuple = outputs.argmax(dim=-1 )
_a , _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_a : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_a : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} ,step=__a ,)
if checkpointing_steps == "epoch":
_a : str = F"""epoch_{epoch}"""
if args.output_dir is not None:
_a : Optional[int] = os.path.join(args.output_dir ,__a )
accelerator.save_state(__a )
if args.with_tracking:
accelerator.end_training()
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' ,required=__a ,help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' ,help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__a ,default=__a ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' ,type=__a ,default=__a ,help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' ,)
parser.add_argument(
'''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--resume_from_checkpoint''' ,type=__a ,default=__a ,help='''If the training should continue from a checkpoint folder.''' ,)
parser.add_argument(
'''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,)
parser.add_argument(
'''--project_dir''' ,type=__a ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,)
_a : str = parser.parse_args()
_a : Optional[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 14 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
A : Optional[Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
A : List[str] = True
A : Union[str, Any] = False
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(__magic_name__ ) )
lowercase__ = number_chain
while number < 1000_0000:
lowercase__ = number_chain
number *= 10
return number_chain
def UpperCamelCase ( __magic_name__ : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __magic_name__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 15 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[Any] = 'โ'
__A : Any = {'vocab_file': 'sentencepiece.bpe.model'}
__A : int = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
__A : str = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[Any]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ' | 's' | 'โde' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ' | 's' | 'โde' | '-' | 'โa'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 16 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsรฉ."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[Any] = '''open-llama'''
def __init__( self : Union[str, Any] , __A : Dict=10_0000 , __A : Optional[int]=4096 , __A : str=1_1008 , __A : str=32 , __A : Union[str, Any]=32 , __A : str="silu" , __A : Tuple=2048 , __A : Optional[Any]=0.0_2 , __A : List[Any]=1e-6 , __A : Dict=True , __A : Optional[Any]=0 , __A : str=1 , __A : Any=2 , __A : Optional[int]=False , __A : List[Any]=True , __A : Any=0.1 , __A : int=0.1 , __A : Optional[int]=True , __A : Optional[int]=True , __A : Optional[Any]=None , **__A : str , ):
__A : List[str] = vocab_size
__A : Dict = max_position_embeddings
__A : Dict = hidden_size
__A : str = intermediate_size
__A : Tuple = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : Optional[int] = hidden_act
__A : List[Any] = initializer_range
__A : Tuple = rms_norm_eps
__A : str = use_cache
__A : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""" , __A )
__A : Union[str, Any] = hidden_dropout_prob
__A : Optional[Any] = attention_dropout_prob
__A : str = use_stable_embedding
__A : Dict = shared_input_output_embedding
__A : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def lowerCAmelCase_ ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
__A : Dict = self.rope_scaling.get("""type""" , __A )
__A : Optional[Any] = self.rope_scaling.get("""factor""" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 17 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
_lowerCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = math.sqrt(__snake_case )
_UpperCamelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase__ ( __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0, __snake_case ):
for j in range(0, __snake_case ):
_UpperCamelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = np.zeros(img.shape )
_UpperCamelCase = get_gauss_kernel(__snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
_UpperCamelCase = get_slice(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCamelCase = vec_gaussian(__snake_case, __snake_case )
_UpperCamelCase = np.multiply(__snake_case, __snake_case )
_UpperCamelCase = np.multiply(__snake_case, __snake_case )
_UpperCamelCase = np.sum(__snake_case ) / np.sum(__snake_case )
_UpperCamelCase = val
return imga
def lowerCamelCase__ ( __snake_case ) -> tuple:
"""simple docstring"""
_UpperCamelCase = args[1] if args[1:] else '''../image_data/lena.jpg'''
_UpperCamelCase = float(args[2] ) if args[2:] else 1.0
_UpperCamelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCamelCase = int(args[4] )
_UpperCamelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCamelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_a , _a , _a , _a = parse_args(sys.argv)
_a = cva.imread(filename, 0)
cva.imshow("""input image""", img)
_a = img / 255
_a = out.astype("""float32""")
_a = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_a = out * 255
_a = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 19 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=0.6 , lowercase_=None , ) -> Dict:
a__ =parent
a__ =batch_size
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =is_training
a__ =use_labels
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =type_sequence_label_size
a__ =initializer_range
a__ =mask_ratio
a__ =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ =(image_size // patch_size) ** 2
a__ =int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self) -> List[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =TFViTMAEModel(config=lowercase_)
a__ =model(lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
a__ =TFViTMAEForPreTraining(lowercase_)
a__ =model(lowercase_ , training=lowercase_)
# expected sequence length = num_patches
a__ =(self.image_size // self.patch_size) ** 2
a__ =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
a__ =1
a__ =TFViTMAEForPreTraining(lowercase_)
a__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ =model(lowercase_ , training=lowercase_)
a__ =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self) -> List[str]:
a__ =self.prepare_config_and_inputs()
((a__) , (a__) , (a__)) =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case ={'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Dict:
a__ =TFViTMAEModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self) -> Optional[int]:
pass
def __UpperCamelCase ( self) -> List[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Layer))
def __UpperCamelCase ( self) -> Dict:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
def __UpperCamelCase ( self) -> Any:
# make the mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_))
a__ =model(**lowercase_ , noise=lowercase_)
a__ =outputs_dict[0].numpy()
a__ =outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1e-6)
def __UpperCamelCase ( self) -> Any:
# make the mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(lowercase_):
a__ ={}
for k, v in inputs_dict.items():
if tf.is_tensor(lowercase_):
a__ =v.numpy()
else:
a__ =np.array(lowercase_)
return inputs_np_dict
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =prepare_numpy_arrays(lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =model(**lowercase_ , noise=lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
# make masks reproducible
np.random.seed(2)
a__ =int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a__ =tf.constant(lowercase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ =tf_noise
super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ ={
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(lowercase_)
if module_member_name.endswith('MainLayer')
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer')] == model_class.__name__[: -len('Model')]
for module_member in (getattr(lowercase_ , lowercase_),)
if isinstance(lowercase_ , lowercase_)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowercase_ , '_keras_serializable' , lowercase_)
}
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a__ =tf.convert_to_tensor(lowercase_)
inputs_dict.update({'noise': noise})
for main_layer_class in tf_main_layer_classes:
a__ =main_layer_class(lowercase_)
a__ ={
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
a__ =tf.keras.Model(lowercase_ , outputs=main_layer(lowercase_))
a__ =model(lowercase_)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =os.path.join(lowercase_ , 'keras_model.h5')
model.save(lowercase_)
a__ =tf.keras.models.load_model(
lowercase_ , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(lowercase_ , tf.keras.Model)
a__ =model(lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
if model_class.__name__ == "TFViTMAEModel":
a__ =outputs.last_hidden_state.numpy()
a__ =0
else:
a__ =outputs.logits.numpy()
a__ =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_)
a__ =model_class.from_pretrained(lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
if model_class.__name__ == "TFViTMAEModel":
a__ =after_outputs['last_hidden_state'].numpy()
a__ =0
else:
a__ =after_outputs['logits'].numpy()
a__ =0
a__ =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase_ , 1e-5)
def __UpperCamelCase ( self) -> Any:
# make mask reproducible
np.random.seed(2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =int((config.image_size // config.patch_size) ** 2)
a__ =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(lowercase_ , noise=lowercase_)
a__ =model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowercase_)
a__ =model_class.from_config(model.get_config())
# make sure it also accepts a normal config
a__ =model_class.from_config(model.config)
a__ =new_model(lowercase_) # Build model
new_model.set_weights(model.get_weights())
a__ =new_model(lowercase_ , noise=lowercase_)
self.assert_outputs_same(lowercase_ , lowercase_)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self) -> List[str]:
pass
@slow
def __UpperCamelCase ( self) -> int:
a__ =TFViTMAEModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(lowercase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Dict:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2)
a__ =TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base')
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='tf')
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ =ViTMAEConfig()
a__ =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
a__ =np.random.uniform(size=(1, num_patches))
# forward pass
a__ =model(**lowercase_ , noise=lowercase_)
# verify the logits
a__ =tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , lowercase_)
a__ =tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowercase_ , atol=1e-4)
| 20 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
import operator
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None ):
__magic_name__ : Any =operator.lt if reverse else operator.gt
__magic_name__ : Union[str, Any] =solution or []
if not arr:
return solution
__magic_name__ : Optional[Any] =[arr.pop(0 )]
for i, item in enumerate(lowerCamelCase ):
if _operator(lowerCamelCase , sublist[-1] ):
sublist.append(lowerCamelCase )
arr.pop(lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase )
else:
while sublist:
__magic_name__ : List[Any] =sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase ):
if not _operator(lowerCamelCase , lowerCamelCase ):
solution.insert(lowerCamelCase , lowerCamelCase )
break
else:
solution.append(lowerCamelCase )
strand_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 21 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : List[Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
if index == number_of_items:
return 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1)
if weights[index] <= max_weight:
UpperCamelCase_ = values[index] + knapsack(
__lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1)
return max(__lowercase , __lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
if gpta_config_file == "":
__snake_case = GPTaConfig()
else:
__snake_case = GPTaConfig.from_json_file(_lowerCamelCase )
__snake_case = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 24 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='instructblip_vision_model'
def __init__( self : List[str] , a : Optional[int]=1408 , a : List[str]=6144 , a : str=39 , a : Optional[int]=16 , a : List[Any]=224 , a : Dict=14 , a : Union[str, Any]="gelu" , a : Tuple=1e-6 , a : Any=0.0 , a : str=1e-10 , a : Tuple=True , **a : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
@classmethod
def __UpperCamelCase ( cls : int , a : Union[str, os.PathLike] , **a : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
SCREAMING_SNAKE_CASE : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='instructblip_qformer'
def __init__( self : List[Any] , a : str=3_0522 , a : List[Any]=768 , a : Tuple=12 , a : Dict=12 , a : List[Any]=3072 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : List[Any]=512 , a : List[str]=0.02 , a : List[str]=1e-12 , a : Optional[Any]=0 , a : Union[str, Any]="absolute" , a : Any=2 , a : Union[str, Any]=1408 , **a : Tuple , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
SCREAMING_SNAKE_CASE : Dict = cross_attention_frequency
SCREAMING_SNAKE_CASE : int = encoder_hidden_size
@classmethod
def __UpperCamelCase ( cls : Dict , a : Union[str, os.PathLike] , **a : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = cls.get_config_dict(a , **a )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
SCREAMING_SNAKE_CASE : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='instructblip'
lowerCamelCase__ =True
def __init__( self : int , a : Optional[Any]=None , a : str=None , a : List[Any]=None , a : Optional[int]=32 , **a : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(**a )
if vision_config is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
SCREAMING_SNAKE_CASE : Tuple = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
SCREAMING_SNAKE_CASE : int = InstructBlipVisionConfig(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = InstructBlipQFormerConfig(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_config["model_type"] if "model_type" in text_config else "opt"
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[text_model_type](**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE : str = num_query_tokens
SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
SCREAMING_SNAKE_CASE : Tuple = 0.02
@classmethod
def __UpperCamelCase ( cls : int , a : InstructBlipVisionConfig , a : InstructBlipQFormerConfig , a : PretrainedConfig , **a : int , ) -> List[str]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a , )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Any = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output | 25 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = CTRLTokenizer
lowercase__: Dict = False
lowercase__: List[Any] = False
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Any = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__snake_case : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Optional[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__snake_case : int = {"""unk_token""": """<unk>"""}
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowercase__ ( self : Union[str, Any] , **__magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = """adapt react readapt apt"""
__snake_case : Tuple = """adapt react readapt apt"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = """adapt react readapt apt"""
__snake_case : List[str] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__snake_case : List[str] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
__snake_case : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
| 26 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 42
class lowerCamelCase( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (64,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 256 , snake_case_ = 32 , snake_case_ = None , snake_case_ = 0.1_8215 , snake_case_ = "group" , ):
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
_A = vq_embed_dim if vq_embed_dim is not None else latent_channels
_A = nn.Convad(snake_case_ , snake_case_ , 1 )
_A = VectorQuantizer(snake_case_ , snake_case_ , beta=0.25 , remap=snake_case_ , sane_index_shape=snake_case_ )
_A = nn.Convad(snake_case_ , snake_case_ , 1 )
# pass init params to Decoder
_A = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , norm_type=snake_case_ , )
@apply_forward_hook
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = True ):
_A = self.encoder(snake_case_ )
_A = self.quant_conv(snake_case_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case_ )
@apply_forward_hook
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
# also go through quantization layer
if not force_not_quantize:
_A, _A, _A = self.quantize(snake_case_ )
else:
_A = h
_A = self.post_quant_conv(snake_case_ )
_A = self.decoder(snake_case_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = True ):
_A = sample
_A = self.encode(snake_case_ ).latents
_A = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 27 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Tuple=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : List[Any] = old_item.replace('in_layers.0' ,'norm1' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('in_layers.2' ,'conv1' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('out_layers.0' ,'norm2' )
SCREAMING_SNAKE_CASE : int = new_item.replace('out_layers.3' ,'conv2' )
SCREAMING_SNAKE_CASE : Tuple = new_item.replace('emb_layers.1' ,'time_emb_proj' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('skip_connection' ,'conv_shortcut' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : str = old_item
SCREAMING_SNAKE_CASE : Optional[int] = new_item.replace('norm.weight' ,'group_norm.weight' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('norm.bias' ,'group_norm.bias' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('proj_out.weight' ,'proj_attn.weight' )
SCREAMING_SNAKE_CASE : List[str] = new_item.replace('proj_out.bias' ,'proj_attn.bias' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict=None ,__UpperCamelCase: Union[str, Any]=None ,__UpperCamelCase: Any=None ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE : Optional[int] = old_checkpoint[path]
SCREAMING_SNAKE_CASE : str = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE : Any = old_tensor.shape[0] // config['num_head_channels'] // 3
SCREAMING_SNAKE_CASE : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = old_tensor.split(channels // num_heads ,dim=1 )
SCREAMING_SNAKE_CASE : Tuple = query.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = key.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = value.reshape(__UpperCamelCase )
for path in paths:
SCREAMING_SNAKE_CASE : List[str] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE : str = new_path.replace('middle_block.0' ,'mid_block.resnets.0' )
SCREAMING_SNAKE_CASE : int = new_path.replace('middle_block.1' ,'mid_block.attentions.0' )
SCREAMING_SNAKE_CASE : Any = new_path.replace('middle_block.2' ,'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_path.replace(replacement['old'] ,replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_checkpoint[path['old']][:, :, 0]
else:
SCREAMING_SNAKE_CASE : Tuple = old_checkpoint[path['old']]
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : int = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.bias']
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE : str = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE : str = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
SCREAMING_SNAKE_CASE : Any = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
SCREAMING_SNAKE_CASE : str = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE : Any = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
SCREAMING_SNAKE_CASE : int = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = (i - 1) // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : List[str] = (i - 1) % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Any = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[int] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
SCREAMING_SNAKE_CASE : Dict = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
SCREAMING_SNAKE_CASE : int = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = {'old': f"input_blocks.{i}.0", 'new': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
SCREAMING_SNAKE_CASE : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path, resnet_op] ,config=__UpperCamelCase )
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {
'old': f"input_blocks.{i}.1",
'new': f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : str = {
f"input_blocks.{i}.1.qkv.bias": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : List[str] = middle_blocks[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = middle_blocks[1]
SCREAMING_SNAKE_CASE : List[Any] = middle_blocks[2]
SCREAMING_SNAKE_CASE : Any = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : str = i % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Dict = [shave_segments(__UpperCamelCase ,2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE : str = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = layer.split('.' )[0], shave_segments(__UpperCamelCase ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = [layer_name]
if len(__UpperCamelCase ) > 1:
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
SCREAMING_SNAKE_CASE : Dict = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = {'old': f"output_blocks.{i}.0", 'new': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE : Optional[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
SCREAMING_SNAKE_CASE : List[str] = []
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'old': f"output_blocks.{i}.1",
'new': f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : int = {
f"output_blocks.{i}.1.qkv.bias": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None ,config=__UpperCamelCase ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE : Dict = '.'.join(['output_blocks', str(__UpperCamelCase ), path['old']] )
SCREAMING_SNAKE_CASE : Optional[int] = '.'.join(['up_blocks', str(__UpperCamelCase ), 'resnets', str(__UpperCamelCase ), path['new']] )
SCREAMING_SNAKE_CASE : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase_ = json.loads(f.read())
UpperCamelCase_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase_ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
from PIL import Image
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
def brightness(lowerCAmelCase__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
A_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 29 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__a = 'scheduler_config.json'
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 5
lowerCAmelCase = 6
lowerCAmelCase = 7
lowerCAmelCase = 8
lowerCAmelCase = 9
lowerCAmelCase = 10
lowerCAmelCase = 11
lowerCAmelCase = 12
lowerCAmelCase = 13
lowerCAmelCase = 14
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a:
"""simple docstring"""
lowerCAmelCase = SCHEDULER_CONFIG_NAME
lowerCAmelCase = []
lowerCAmelCase = True
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = cls.load_config(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE ,subfolder=_SCREAMING_SNAKE_CASE ,return_unused_kwargs=_SCREAMING_SNAKE_CASE ,return_commit_hash=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
return cls.from_config(_SCREAMING_SNAKE_CASE ,return_unused_kwargs=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,**_SCREAMING_SNAKE_CASE ) -> Dict:
self.save_config(save_directory=_SCREAMING_SNAKE_CASE ,push_to_hub=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def a__ ( self ) -> Tuple:
return self._get_compatibles()
@classmethod
def a__ ( cls ) -> Optional[Any]:
UpperCAmelCase_ : int = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase_ : int = importlib.import_module(__name__.split('''.''' )[0] )
UpperCAmelCase_ : Any = [
getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
return compatible_classes | 30 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase_ ( __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 , __UpperCAmelCase : float = 1 , __UpperCAmelCase : float = 1.0E4 , __UpperCAmelCase : bool = False , __UpperCAmelCase : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
SCREAMING_SNAKE_CASE_ = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE_ = min_timescale * jnp.exp(jnp.arange(__UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(__UpperCAmelCase , 1 ) * jnp.expand_dims(__UpperCAmelCase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE_ = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE_ = jnp.concatenate([jnp.cos(__UpperCAmelCase ), jnp.sin(__UpperCAmelCase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE_ = jnp.concatenate([jnp.sin(__UpperCAmelCase ), jnp.cos(__UpperCAmelCase )] , axis=1 )
SCREAMING_SNAKE_CASE_ = jnp.reshape(__UpperCAmelCase , [jnp.shape(__UpperCAmelCase )[0], embedding_dim] )
return signal
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = jnp.floataa
@nn.compact
def __call__( self : int , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.silu(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_lowerCAmelCase )
return temb
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = False
lowercase_ = 1
@nn.compact
def __call__( self : Union[str, Any] , _lowerCAmelCase : int ):
return get_sinusoidal_embeddings(
_lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift ) | 31 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-ๅนด])*(0?[1-9]|1[0-2])[/\-ๆ]((0?[1-9]|[12][0-9]|3[01])ๆฅ?)*(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"(ๆๆฒป|ๅคงๆญฃ|ๆญๅ|ๅนณๆ|ไปคๅ|ใพ|ใฝ|ใผ|ใป|\u32ff)\d{1,2}ๅนด(0?[1-9]|1[0-2])ๆ(0?[1-9]|[12][0-9]|3[01])ๆฅ(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ไธ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(ๅๅ|ไธๅ|ๅไธๅ|ๅ|ๅใใซ|ไธใใซ|ๅไธใใซ|ใใซ|ๅใฆใผใญ|ไธใฆใผใญ|ๅไธใฆใผใญ|ใฆใผใญ)+(\(็จ่พผ\)|\(็จๆ\)|\+tax)*")
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟ"
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace("ใ" , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("โ" , "ใผ")
a_ = text.replace("โ" , "ใผ")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("โ")
elif word == "<KIGOU>":
words.append("ว")
elif word == "<U2000U2BFF>":
words.append("โ")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Any , *_a:List[Any] , **_a:Any ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 11
UpperCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase ,_lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase ,_lowercase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
UpperCamelCase = 10
return solutions
def __snake_case ( _lowercase = 2 ):
"""simple docstring"""
UpperCamelCase = 1.0
for fraction in fraction_list(_lowercase ):
UpperCamelCase = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 34 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : Dict = ['''speech''']
def __init__( self : Any , *_lowercase : Tuple , **_lowercase : Optional[Any] ):
requires_backends(self , ['''speech'''] )
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : str = ['''speech''']
def __init__( self : List[str] , *_lowercase : Optional[int] , **_lowercase : str ):
requires_backends(self , ['''speech'''] )
| 35 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase : Any = logging.get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if not conversation_id:
snake_case : str = uuid.uuida()
if past_user_inputs is None:
snake_case : Any = []
if generated_responses is None:
snake_case : Dict = []
snake_case : uuid.UUID = conversation_id
snake_case : List[str] = past_user_inputs
snake_case : List[str] = generated_responses
snake_case : Optional[str] = text
def __eq__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
snake_case : Optional[Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
snake_case : Any = text
def snake_case_ ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case : Tuple = None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
snake_case : List[str] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
snake_case : List[Any] = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
snake_case , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
snake_case : int = self.tokenizer.eos_token
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = {}
snake_case : str = {}
snake_case : List[Any] = {}
if min_length_for_response is not None:
snake_case : Optional[int] = min_length_for_response
if minimum_tokens is not None:
snake_case : Dict = minimum_tokens
if "max_length" in generate_kwargs:
snake_case : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ ,num_workers=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=32 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case : List[str] = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
snake_case : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=10 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
snake_case : Any = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
snake_case : List[str] = max_length - minimum_tokens
snake_case : List[str] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case : Dict = model_inputs["""attention_mask"""][:, -trim:]
snake_case : Tuple = model_inputs.pop("""conversation""" )
snake_case : Tuple = max_length
snake_case : List[Any] = self.model.generate(**SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
snake_case : Tuple = 1
else:
snake_case : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
snake_case : Optional[int] = model_outputs["""output_ids"""]
snake_case : List[str] = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ,)
snake_case : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = self.tokenizer.eos_token_id
snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 36 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : int = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gptsan-japanese'
_lowercase = [
'past_key_values',
]
_lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , lowerCamelCase__ : Tuple=36_000 , lowerCamelCase__ : Optional[int]=1_280 , lowerCamelCase__ : Any=1_024 , lowerCamelCase__ : Dict=8_192 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Optional[int]=128 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : List[Any]=128 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=1E-5 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any="float32" , lowerCamelCase__ : str=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=0.002 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=35_998 , lowerCamelCase__ : List[str]=35_995 , lowerCamelCase__ : int=35_999 , **lowerCamelCase__ : int , ):
a__ : Optional[Any] = vocab_size
a__ : Tuple = max_position_embeddings
a__ : str = d_model
a__ : Tuple = d_ff
a__ : Tuple = d_ext
a__ : List[Any] = d_spout
a__ : str = num_switch_layers
a__ : List[Any] = num_ext_layers
a__ : List[Any] = num_switch_layers + num_ext_layers
a__ : Dict = num_heads
a__ : List[str] = num_experts
a__ : Optional[Any] = expert_capacity
a__ : str = dropout_rate
a__ : int = layer_norm_epsilon
a__ : Any = router_bias
a__ : Dict = router_jitter_noise
a__ : Any = router_dtype
a__ : Optional[Any] = router_ignore_padding_tokens
a__ : Optional[Any] = output_hidden_states
a__ : List[Any] = output_attentions
a__ : Any = initializer_factor
a__ : List[Any] = output_router_logits
a__ : Dict = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 37 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Dict = 1_3
snake_case__ : Any = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = True
snake_case__ : str = True
snake_case__ : Optional[Any] = 9_9
snake_case__ : Dict = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : List[str] = 4
snake_case__ : Union[str, Any] = 3_7
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : Optional[Any] = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : List[Any] = 5_1_2
snake_case__ : Tuple = 1_6
snake_case__ : Tuple = 2
snake_case__ : List[str] = 0.02
snake_case__ : Tuple = 3
snake_case__ : List[str] = 4
snake_case__ : Optional[Any] = 1_2_8
snake_case__ : str = 2
snake_case__ : Union[str, Any] = 9
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = None
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : int = [input_ids, input_mask]
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
snake_case__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.num_labels
snake_case__ : int = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_choices
snake_case__ : List[Any] = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_labels
snake_case__ : List[Any] = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = config_and_inputs
snake_case__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Dict = TFConvBertModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = True
if hasattr(__SCREAMING_SNAKE_CASE , """use_cache""" ):
snake_case__ : Optional[Any] = True
snake_case__ : str = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Dict = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , """saved_model""" , """1""" )
snake_case__ : List[str] = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : Optional[int] = outputs["""encoder_hidden_states"""]
snake_case__ : str = outputs["""encoder_attentions"""]
else:
snake_case__ : List[str] = outputs["""hidden_states"""]
snake_case__ : Tuple = outputs["""attentions"""]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Tuple = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : int = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
snake_case__ : Dict = False
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : Any = True
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Any = [1, 6, 7_6_8]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
| 38 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsรฉ."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return 1 / (1 + np.exp(-vector ))
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
import json
import sys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Dict:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Optional[Any] = json.load(snake_case__ )
UpperCamelCase : int = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(snake_case__ ):
UpperCamelCase : List[str] = results[benchmark_name]
UpperCamelCase : Tuple = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCamelCase : Optional[Any] = '| metric |'
UpperCamelCase : List[Any] = '|--------|'
UpperCamelCase : str = '| new / old (diff) |'
for metric_name in sorted(snake_case__ ):
UpperCamelCase : Dict = benchmark_res[metric_name]
UpperCamelCase : str = metric_vals['new']
UpperCamelCase : Optional[int] = metric_vals.get('old' , snake_case__ )
UpperCamelCase : Optional[Any] = metric_vals.get('diff' , snake_case__ )
UpperCamelCase : Union[str, Any] = F""" {new_val:f}""" if isinstance(snake_case__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(snake_case__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(snake_case__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(snake_case__ ) )
if __name__ == "__main__":
__UpperCAmelCase = sys.argv[1]
__UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 40 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = UnCLIPImageVariationPipeline
SCREAMING_SNAKE_CASE : str = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
SCREAMING_SNAKE_CASE : List[str] = IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
SCREAMING_SNAKE_CASE : int = False
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
__lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=3_2 ,intermediate_size=3_7 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__lowercase = UnCLIPTextProjModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
torch.manual_seed(0 )
__lowercase = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__lowercase = UNetaDConditionModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowercase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.dummy_decoder
__lowercase = self.dummy_text_proj
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_super_res_first
__lowercase = self.dummy_super_res_last
__lowercase = UnCLIPScheduler(
variance_type='''learned_range''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
__lowercase = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
__lowercase = CLIPImageProcessor(crop_size=3_2 ,size=3_2 )
__lowercase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : List[Any]=0 ,lowercase__ : List[Any]=True ):
__lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 ,1 )
__lowercase = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowercase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
__lowercase = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = torch.device('''cpu''' )
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 1
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(0 )
__lowercase = pipe.decoder.dtype
__lowercase = 1
__lowercase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowercase = pipe.prepare_latents(
lowercase__ ,dtype=lowercase__ ,device=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,scheduler=DummyScheduler() )
__lowercase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowercase = pipe.prepare_latents(
lowercase__ ,dtype=lowercase__ ,device=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,scheduler=DummyScheduler() )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,decoder_latents=lowercase__ ,super_res_latents=lowercase__ ).images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
# Don't pass image, instead pass embedding
__lowercase = pipeline_inputs.pop('''image''' )
__lowercase = pipe.image_encoder(lowercase__ ).image_embeds
__lowercase = pipe(
**lowercase__ ,decoder_latents=lowercase__ ,super_res_latents=lowercase__ ,image_embeddings=lowercase__ ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowercase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase__ ,expected_max_diff=lowercase__ )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = torch_device == '''cpu'''
__lowercase = True
__lowercase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase__ ,relax_max_difference=lowercase__ ,additional_params_copy_to_batched_inputs=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowercase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase__ ,additional_params_copy_to_batched_inputs=lowercase__ ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase__ )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : str ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__lowercase = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' ,torch_dtype=torch.floataa )
__lowercase = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipeline(
lowercase__ ,generator=lowercase__ ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(lowercase__ ,lowercase__ ,1_5 )
| 41 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_encoder_blocks' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 4, 8, 16] , SCREAMING_SNAKE_CASE_=[1, 2, 4, 8] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_encoder_blocks
lowerCamelCase_ = sr_ratios
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = downsampling_rates
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = lowerCamelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = SegformerModelTester(self )
lowerCamelCase_ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
lowerCamelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-1 ) )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(500, 300)] )
lowerCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and ลukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
_lowerCamelCase : Dict = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
_lowerCamelCase : List[Any] = -1
return False
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return [] | 44 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 1_00 , lowerCamelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ :Optional[float] = None , lowerCamelCase__ :bool = True , ):
if audio_length_in_s is None:
UpperCamelCase__ :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase__ :Tuple = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase__ :str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase__ :Tuple = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCamelCase__ :Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
UpperCamelCase__ :Dict = int(lowerCamelCase__ )
UpperCamelCase__ :Any = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase__ :Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ :Union[str, Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
UpperCamelCase__ :List[Any] = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Tuple = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase__ :str = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase__ :Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ ) | 45 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = ""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ""
for i in range(len(_lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = int("0b" + data[0] + data[-1] , 2 )
_lowerCamelCase : int = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = message[:4]
_lowerCamelCase : str = message[4:]
_lowerCamelCase : Any = apply_table(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = xor(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(_lowerCamelCase , temp[4:] )
_lowerCamelCase : Optional[Any] = "0" * (2 - len(_lowerCamelCase )) + l # noqa: E741
_lowerCamelCase : str = "0" * (2 - len(_lowerCamelCase )) + r
_lowerCamelCase : str = apply_table(l + r , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = xor(_lowerCamelCase , _lowerCamelCase )
return temp + right
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input('''Enter 10 bit key: ''')
_lowerCAmelCase : Tuple = input('''Enter 8 bit message: ''')
_lowerCAmelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCAmelCase : Optional[int] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCAmelCase : Tuple = [2, 4, 3, 1]
_lowerCAmelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCAmelCase : int = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCAmelCase : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCAmelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCAmelCase : int = apply_table(key, paa_table)
_lowerCAmelCase : int = temp[:5]
_lowerCAmelCase : Dict = temp[5:]
_lowerCAmelCase : int = left_shift(left)
_lowerCAmelCase : List[Any] = left_shift(right)
_lowerCAmelCase : List[Any] = apply_table(left + right, pa_table)
_lowerCAmelCase : Tuple = left_shift(left)
_lowerCAmelCase : Union[str, Any] = left_shift(right)
_lowerCAmelCase : Any = left_shift(left)
_lowerCAmelCase : List[Any] = left_shift(right)
_lowerCAmelCase : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_lowerCAmelCase : Any = apply_table(message, IP)
_lowerCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : Optional[int] = temp[4:] + temp[:4]
_lowerCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : str = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_lowerCAmelCase : Tuple = apply_table(CT, IP)
_lowerCAmelCase : str = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : Any = temp[4:] + temp[:4]
_lowerCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : int = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT) | 46 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : Dict = filter(lambda lowerCamelCase_ : p.requires_grad , model.parameters() )
__a : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
if metric == "rouge2":
__a : int = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__a : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__a : Any = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__a : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
__a : Union[str, Any] = ModelCheckpoint(
dirpath=lowerCamelCase_ , filename=lowerCamelCase_ , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=lowerCamelCase_ , verbose=lowerCamelCase_ , )
class _UpperCamelCase( pl.Callback ):
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[str] = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=True ):
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__a : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__a : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : int = od / 'test_results.txt'
__a : str = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
__a : List[str] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
__a : Union[str, Any] = val.item()
__a : List[str] = f'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
__a : List[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
try:
__a : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
__a : List[Any] = pl_module.model.num_parameters()
__a : Dict = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 47 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
'''simple docstring'''
import re
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'vision-encoder-decoder'
_UpperCamelCase = True
def __init__( self ,**_lowerCAmelCase ):
super().__init__(**_lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowerCamelCase__ = kwargs.pop("""encoder""" )
lowerCamelCase__ = encoder_config.pop("""model_type""" )
lowerCamelCase__ = kwargs.pop("""decoder""" )
lowerCamelCase__ = decoder_config.pop("""model_type""" )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = True
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-4
@property
def UpperCamelCase_ ( self ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict()
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
import torch
lowerCamelCase__ = OrderedDict()
lowerCamelCase__ = super().generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = dummy_input["""input_ids"""].shape
lowerCamelCase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowerCamelCase__ = dummy_input.pop("""input_ids""" )
lowerCamelCase__ = dummy_input.pop("""attention_mask""" )
lowerCamelCase__ = torch.zeros(_lowerCAmelCase )
return common_inputs
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = "default" ):
lowerCamelCase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCAmelCase ,_lowerCAmelCase )
| 50 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
UpperCAmelCase = model.config
UpperCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCAmelCase = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , )
return encoder_config, decoder_config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if "encoder.model" in name:
UpperCAmelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCAmelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCAmelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase = '''encoder.layernorm.bias'''
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = int(key_split[5] )
UpperCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
# load HuggingFace model
UpperCAmelCase, UpperCAmelCase = get_configs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutSwinModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = MBartForCausalLM(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify results on scanned document
UpperCAmelCase = load_dataset('''hf-internal-testing/example-documents''' )
UpperCAmelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase = DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase = '''When is the coffee break?'''
UpperCAmelCase = task_prompt.replace('''{user_input}''' , SCREAMING_SNAKE_CASE_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCAmelCase = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCAmelCase = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = model.encoder.embeddings(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
# verify encoder hidden states
UpperCAmelCase = original_model.encoder(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# verify decoder hidden states
UpperCAmelCase = original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub.',
)
a__ : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A = os.path.join(git_repo_path, '''src''', '''transformers''')
A = '''
{0} = None
'''
A = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
A = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_UpperCAmelCase )
__a : Optional[int] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_UpperCAmelCase , '''tokenizers''' )
__a : List[Any] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_UpperCAmelCase , '''tensorflow_text''' )
__a : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers''' )
__a : str = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' )
__a : Union[str, Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def _lowerCamelCase ( self ):
__a : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _UpperCAmelCase )
self.assertIn('''tensorflow_text''' , _UpperCAmelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , _UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def _lowerCamelCase ( self ):
__a : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , '''\nCONSTANT = None\n''' )
__a : str = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__a : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a : List[str] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _UpperCAmelCase ) | 52 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
from sklearn.metrics import recall_score
import datasets
_snake_case : Dict = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
_snake_case : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
_snake_case : str = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]="binary" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str="warn" , ) -> int:
__lowerCAmelCase = recall_score(
lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_ , pos_label=lowerCAmelCase_ , average=lowerCAmelCase_ , sample_weight=lowerCAmelCase_ , zero_division=lowerCAmelCase_ , )
return {"recall": float(lowerCAmelCase_ ) if score.size == 1 else score}
| 53 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Tuple =logging.get_logger(__name__)
__lowercase : str ={
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''swinv2'''
_snake_case ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Any , _lowerCAmelCase: List[str]=224 , _lowerCAmelCase: Union[str, Any]=4 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=96 , _lowerCAmelCase: str=[2, 2, 6, 2] , _lowerCAmelCase: List[str]=[3, 6, 12, 24] , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Dict=4.0 , _lowerCAmelCase: str=True , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: Union[str, Any]=1e-5 , _lowerCAmelCase: str=32 , **_lowerCAmelCase: int , ) -> int:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =embed_dim
UpperCAmelCase_ =depths
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =window_size
UpperCAmelCase_ =mlp_ratio
UpperCAmelCase_ =qkv_bias
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =drop_path_rate
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =use_absolute_embeddings
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
UpperCAmelCase_ =(0, 0, 0, 0)
| 54 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-ๅนด])*(0?[1-9]|1[0-2])[/\-ๆ]((0?[1-9]|[12][0-9]|3[01])ๆฅ?)*(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"(ๆๆฒป|ๅคงๆญฃ|ๆญๅ|ๅนณๆ|ไปคๅ|ใพ|ใฝ|ใผ|ใป|\u32ff)\d{1,2}ๅนด(0?[1-9]|1[0-2])ๆ(0?[1-9]|[12][0-9]|3[01])ๆฅ(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ไธ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(ๅๅ|ไธๅ|ๅไธๅ|ๅ|ๅใใซ|ไธใใซ|ๅไธใใซ|ใใซ|ๅใฆใผใญ|ไธใฆใผใญ|ๅไธใฆใผใญ|ใฆใผใญ)+(\(็จ่พผ\)|\(็จๆ\)|\+tax)*")
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟ"
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace("ใ" , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("โ" , "ใผ")
a_ = text.replace("โ" , "ใผ")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("โ")
elif word == "<KIGOU>":
words.append("ว")
elif word == "<U2000U2BFF>":
words.append("โ")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( a_ , a_ , **a_ ) -> int:
"""simple docstring"""
__A = AutoConfig.from_pretrained(a_ , **a_ )
__A = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 55 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_a : str = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _a (lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict ) -> int:
"""simple docstring"""
for attribute in key.split('.' ):
__snake_case = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
__snake_case = getattr(lowercase__ , lowercase__ ).shape
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a (lowercase__ : Any , lowercase__ : List[Any] ) -> int:
"""simple docstring"""
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(lowercase__ )[0].split('.' )[-2]
__snake_case = mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
__snake_case = 'weight_g'
elif "weight_v" in name:
__snake_case = 'weight_v'
elif "bias" in name:
__snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = 'weight'
else:
__snake_case = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _a (lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = full_name.split('conv_layers.' )[-1]
__snake_case = name.split('.' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__snake_case = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__snake_case = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__snake_case = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__snake_case = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple=None , lowercase__ : Any=None , lowercase__ : List[str]=True ) -> Dict:
"""simple docstring"""
if config_path is not None:
__snake_case = UniSpeechSatConfig.from_pretrained(lowercase__ )
else:
__snake_case = UniSpeechSatConfig()
__snake_case = ''
if is_finetuned:
__snake_case = UniSpeechSatForCTC(lowercase__ )
else:
__snake_case = UniSpeechSatForPreTraining(lowercase__ )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__snake_case = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 56 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : int =JukeboxTokenizer
a : Optional[Any] ={
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase_: Optional[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase_: Any = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Any = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 57 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowerCAmelCase : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case_ : Optional[Any] = k.replace(__UpperCamelCase , __UpperCamelCase )
return k
def __lowerCAmelCase ( __UpperCamelCase : dict , __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : Any = DEFAULTS.copy()
cfg_kwargs.update(__UpperCamelCase )
snake_case_ : Union[str, Any] = PegasusConfig(**__UpperCamelCase )
snake_case_ : Tuple = PegasusForConditionalGeneration(__UpperCamelCase )
snake_case_ : Optional[int] = torch_model.model.state_dict()
snake_case_ : Dict = {}
for k, v in tf_weights.items():
snake_case_ : str = rename_state_dict_key(__UpperCamelCase )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
snake_case_ : Optional[Any] = v.T
snake_case_ : List[str] = torch.tensor(__UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
snake_case_ : str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case_ : Tuple = mapping["""shared.weight"""]
snake_case_ : Optional[Any] = mapping["""shared.weight"""]
snake_case_ : List[str] = {k: torch.zeros_like(__UpperCamelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = torch_model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
snake_case_ : str = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def __lowerCAmelCase ( __UpperCamelCase : List[Any]="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
snake_case_ : Dict = tf.train.list_variables(__UpperCamelCase )
snake_case_ : Any = {}
snake_case_ : List[Any] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__UpperCamelCase , desc="""converting tf checkpoint to dict""" ):
snake_case_ : Dict = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ : Union[str, Any] = tf.train.load_variable(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = array
return tf_weights
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Union[str, Any] = Path(__UpperCamelCase ).parent.name
snake_case_ : str = task_specific_params[F'summarization_{dataset}']["""max_position_embeddings"""]
snake_case_ : int = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__UpperCamelCase )
# convert model
snake_case_ : Tuple = get_tf_weights_as_numpy(__UpperCamelCase )
snake_case_ : Dict = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
snake_case_ : int = task_specific_params
snake_case_ : Optional[Any] = convert_pegasus(__UpperCamelCase , __UpperCamelCase )
torch_model.save_pretrained(__UpperCamelCase )
snake_case_ : str = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__UpperCamelCase , Path(__UpperCamelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCAmelCase : Dict = parser.parse_args()
if args.save_dir is None:
__lowerCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
__lowerCAmelCase : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 58 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
def lowerCamelCase_ ( _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : List[Any] = 0
while len(_UpperCamelCase ) > 1:
snake_case_ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case_ : str = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsรฉ."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _A ( *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ):
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase_ )
import datasets
SCREAMING_SNAKE_CASE : int = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : List[Any] = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , UpperCAmelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _A ( self : Tuple ):
pass
@slow
@require_torch
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = "Intel/dpt-large"
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline("depth-estimation" , model=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
SCREAMING_SNAKE_CASE : Optional[int] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _A ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 62 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : Any , __lowercase : List[str]=2 , __lowercase : List[str]=True , __lowercase : Dict=False , __lowercase : Optional[int]=10 , __lowercase : str=3 , __lowercase : Any=32 * 4 , __lowercase : Optional[Any]=32 * 6 , __lowercase : List[str]=4 , __lowercase : Optional[int]=32 , ) -> str:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : str = is_training
__UpperCAmelCase : List[Any] = use_auxiliary_loss
__UpperCAmelCase : Any = num_queries
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Union[str, Any] = min_size
__UpperCAmelCase : Tuple = max_size
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[str] = mask_feature_size
def UpperCAmelCase ( self : Tuple ) -> List[str]:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
__UpperCAmelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
__UpperCAmelCase : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
__UpperCAmelCase : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self : str ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self : List[str] , __lowercase : Dict , __lowercase : int ) -> List[str]:
__UpperCAmelCase : Dict = output.encoder_hidden_states
__UpperCAmelCase : Union[str, Any] = output.pixel_decoder_hidden_states
__UpperCAmelCase : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : int=False ) -> Tuple:
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
__UpperCAmelCase : str = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase : str = model(pixel_values=__lowercase , pixel_mask=__lowercase )
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
comm_check_on_output(__lowercase )
__UpperCAmelCase : int = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a : List[str] = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a : Dict = False
a : Any = False
a : Optional[Any] = False
a : Dict = False
def UpperCAmelCase ( self : Any ) -> Dict:
__UpperCAmelCase : Dict = MaskFormerModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase ( self : Optional[int] ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
pass
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__UpperCAmelCase : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : int = (self.model_tester.min_size,) * 2
__UpperCAmelCase : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowercase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowercase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowercase ).long(),
}
__UpperCAmelCase : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
__UpperCAmelCase : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(__lowercase ).to(__lowercase )
__UpperCAmelCase : Optional[int] = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase ( self : int ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__UpperCAmelCase : Optional[int] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
__UpperCAmelCase : List[Any] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : Optional[int] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__UpperCAmelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : Dict = 1e-4
def lowerCamelCase__ ( ):
__UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : str ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__lowercase )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**__lowercase )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__UpperCAmelCase : Any = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : Any = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowercase )
# masks_queries_logits
__UpperCAmelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCAmelCase : str = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
__UpperCAmelCase : Dict = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
__UpperCAmelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Any = image_processor(__lowercase , return_tensors="""pt""" ).to(__lowercase )
__UpperCAmelCase : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 800, 1088) )
with torch.no_grad():
__UpperCAmelCase : int = model(**__lowercase )
# masks_queries_logits
__UpperCAmelCase : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCAmelCase : Tuple = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
__UpperCAmelCase : Dict = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
__UpperCAmelCase : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowercase )
.eval()
)
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__UpperCAmelCase : int = inputs["""pixel_values"""].to(__lowercase )
__UpperCAmelCase : str = [el.to(__lowercase ) for el in inputs["""mask_labels"""]]
__UpperCAmelCase : Optional[Any] = [el.to(__lowercase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
| 63 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]:
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE__: Any= self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE__: Any= str(distributed_port + 1 )
SCREAMING_SNAKE_CASE__: int= dist.new_group(ranks=lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> List[str]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=torch.floataa ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= torch.empty(lowerCAmelCase , dtype=lowerCAmelCase )
dist.scatter(lowerCAmelCase , src=0 , scatter_list=lowerCAmelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE__: int= next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase )
return ifname
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
# distributed training
SCREAMING_SNAKE_CASE__: List[str]= dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self._is_main():
SCREAMING_SNAKE_CASE__: List[Any]= [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase )]
dist.gather(torch.tensor(lowerCAmelCase ) , dst=0 , gather_list=lowerCAmelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE__: Any= question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= []
SCREAMING_SNAKE_CASE__: str= []
if self._is_main():
assert len(lowerCAmelCase ) == world_size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self._main_retrieve(torch.cat(lowerCAmelCase ).numpy() , lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self._scattered(lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._scattered(lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase )
| 64 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def __lowercase ( self : int ,**A : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt"""
UpperCAmelCase__ : Any = """adapt react readapt apt"""
return input_text, output_text
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase__ : Tuple = """adapt react readapt apt"""
UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase__ : Dict = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 65 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import os
import pytest
from attr import dataclass
UpperCamelCase = "us-east-1" # defaults region
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str
_UpperCamelCase : int = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
_UpperCamelCase : Dict = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
_UpperCamelCase : int = {**hyperparameters, "max_steps": 1000}
@property
def __a ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __a ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def __a ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __a ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 66 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
snake_case = """sshleifer/bart-tiny-random"""
snake_case = """patrickvonplaten/t5-tiny-random"""
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return AutoConfig.from_pretrained(__A )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase , *_lowercase = create_student_by_copying_alternating_layers(__A ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def __UpperCAmelCase ( self : Tuple ) -> int:
_lowercase , *_lowercase = create_student_by_copying_alternating_layers(__A ,tempfile.mkdtemp() ,e=1 ,d=__A )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase , *_lowercase = create_student_by_copying_alternating_layers(__A ,tempfile.mkdtemp() ,e=1 ,d=__A )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase , *_lowercase = create_student_by_copying_alternating_layers(__A ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
with self.assertRaises(__A ):
create_student_by_copying_alternating_layers(__A ,tempfile.mkdtemp() ,e=__A ,d=__A ) | 67 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
def lowercase__ ( A_: float , A_: float , A_: float , A_: float , A_: float , ) -> float:
"""simple docstring"""
__UpperCAmelCase =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__UpperCAmelCase =1 - (matter_density + radiation_density + dark_energy)
__UpperCAmelCase =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCAmelCase =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__A = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(a_ ) , torch_builtin(a_ ) ) )
self.assertFalse(torch.allclose(gelu_python(a_ ) , gelu_new(a_ ) ) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__snake_case = get_activation("gelu" )
__snake_case = get_activation("gelu_10" )
__snake_case = torch_builtin(a_ )
__snake_case = geluaa(a_ )
__snake_case = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(a_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A ( self : Tuple ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(a_ ):
get_activation("bogus" )
with self.assertRaises(a_ ):
get_activation(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = get_activation("gelu" )
__snake_case = 1
__snake_case = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(a_ ):
__snake_case = acta.a
| 69 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Any = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 70 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : int = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = tmp_path / "cache"
UpperCAmelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase_ : Optional[Any] = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Optional[Any] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = parquet_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = [parquet_path]
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=("train",) ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
UpperCAmelCase_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase_ : str = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if split:
UpperCAmelCase_ : Tuple = {split: parquet_path}
else:
UpperCAmelCase_ : int = "train"
UpperCAmelCase_ : int = {"train": parquet_path, "test": parquet_path}
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Optional[int] = pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCAmelCase_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = str(shared_datadir / "test_image_rgb.jpg" )
UpperCAmelCase_ : Optional[int] = {"image": [image_path]}
UpperCAmelCase_ : List[str] = Features({"image": Image()} )
UpperCAmelCase_ : Any = Dataset.from_dict(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Tuple = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase_ : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
assert get_writer_batch_size(_SCREAMING_SNAKE_CASE ) == expected
| 71 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ):
lowercase =parent
lowercase =batch_size
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =is_training
lowercase =use_labels
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =mask_ratio
lowercase =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase =(image_size // patch_size) ** 2
lowercase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _A( self ):
lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =self.get_config()
return config, pixel_values, labels
def _A( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase =ViTMAEModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase =ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
lowercase =(self.image_size // self.patch_size) ** 2
lowercase =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase =1
lowercase =ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase =model(snake_case_ )
lowercase =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase =config_and_inputs
lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =ViTMAEModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _A( self ):
pass
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase =[*signature.parameters.keys()]
lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
# make masks reproducible
np.random.seed(2 )
lowercase =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase =torch.from_numpy(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase =pt_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =outputs[0].cpu().numpy()
lowercase =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
lowercase =model_class.from_pretrained(snake_case_ )
model.to(snake_case_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
# Make sure we don't have nans
lowercase =after_outputs[0].cpu().numpy()
lowercase =0
lowercase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _A( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _A( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _A( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _A( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A( self ):
pass
@slow
def _A( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =ViTMAEModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _A( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _A( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase =ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(snake_case_ )
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase =ViTMAEConfig()
lowercase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ , noise=torch.from_numpy(snake_case_ ).to(device=snake_case_ ) )
# verify the logits
lowercase =torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , snake_case_ )
lowercase =torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case_ ) , atol=1E-4 ) )
| 72 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'generated'
def __init__( self : List[str] , *_A : Optional[Any] , **_A : List[str] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ ( self : Any , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Any=None , _A : Union[str, Any]=None , _A : str=None , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
if truncation is not None:
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Any = generate_kwargs
UpperCAmelCase__ : Optional[Any] = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ : Tuple = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : int ):
'''simple docstring'''
return True
def lowercase_ ( self : List[str] , *_A : List[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase__ : Tuple = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ : Dict = True
elif isinstance(args[0] , _A ):
UpperCAmelCase__ : List[str] = (prefix + args[0],)
UpperCAmelCase__ : Dict = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase__ : List[str] = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : int , *_A : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def lowercase_ ( self : Tuple , _A : str , **_A : Any ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase__ : str = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase__ : int = self.model.generate(**_A , **_A )
UpperCAmelCase__ : List[Any] = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : str = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : Union[str, Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ ( self : Union[str, Any] , _A : Any , _A : Any=ReturnType.TEXT , _A : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : Any = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ : List[str] = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'summary'
def __call__( self : Tuple , *_A : Optional[int] , **_A : Optional[int] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'translation'
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowercase_ ( self : List[Any] , *_A : Any , _A : Dict=TruncationStrategy.DO_NOT_TRUNCATE , _A : str=None , _A : Any=None ):
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
UpperCAmelCase__ : int = src_lang
if tgt_lang is not None:
UpperCAmelCase__ : Union[str, Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ : List[Any] = kwargs.get('''task''' , self.task )
UpperCAmelCase__ : int = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
UpperCAmelCase__ : Any = items[1]
UpperCAmelCase__ : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , *_A : int , **_A : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
| 75 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : Optional[int] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-ๅนด])*(0?[1-9]|1[0-2])[/\-ๆ]((0?[1-9]|[12][0-9]|3[01])ๆฅ?)*(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"(ๆๆฒป|ๅคงๆญฃ|ๆญๅ|ๅนณๆ|ไปคๅ|ใพ|ใฝ|ใผ|ใป|\u32ff)\d{1,2}ๅนด(0?[1-9]|1[0-2])ๆ(0?[1-9]|[12][0-9]|3[01])ๆฅ(\d{1,2}|:|\d{1,2}ๆ|\d{1,2}ๅ|\(ๆฅ\)|\(ๆ\)|\(็ซ\)|\(ๆฐด\)|\(ๆจ\)|\(้\)|\(ๅ\)|ใฐ|ใช|ใซ|ใฌ|ใญ|ใฎ|ใฏ)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ไธ)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*ๅ)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(ๅๅ|ไธๅ|ๅไธๅ|ๅ|ๅใใซ|ไธใใซ|ๅไธใใซ|ใใซ|ๅใฆใผใญ|ไธใฆใผใญ|ๅไธใฆใผใญ|ใฆใผใญ)+(\(็จ่พผ\)|\(็จๆ\)|\+tax)*")
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโปโผโฝโพโฟ"
a_ = "โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace("ใ" , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("โ" , "ใผ")
a_ = text.replace("โ" , "ใผ")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("โ")
elif word == "<KIGOU>":
words.append("ว")
elif word == "<U2000U2BFF>":
words.append("โ")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=2_81_23 ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 78 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase="shi-labs/oneformer_demo" ) -> str:
'''simple docstring'''
with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ : int = json.load(__lowerCamelCase )
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = []
for key, info in class_info.items():
UpperCAmelCase__ : int = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__lowerCamelCase ) )
UpperCAmelCase__ : List[str] = thing_ids
UpperCAmelCase__ : Any = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=255 , _lowerCAmelCase="shi-labs/oneformer_demo" , _lowerCAmelCase="ade20k_panoptic.json" , _lowerCAmelCase=10 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Dict = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Union[str, Any] = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : str = class_info_file
UpperCAmelCase__ : Tuple = prepare_metadata(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Dict = num_text
UpperCAmelCase__ : Dict = repo_path
# for the post_process_functions
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : List[Any] = do_reduce_labels
UpperCAmelCase__ : int = ignore_index
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False ):
if not batched:
UpperCAmelCase__ : Any = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ : Tuple = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : Dict = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Optional[Any] = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
UpperCAmelCase__ : Dict = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase = image_processing_class
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """ignore_index""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """class_info_file""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """num_text""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """repo_path""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """metadata""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_reduce_labels""" ) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : str = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase="np" ):
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.num_labels
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
if with_segmentation_maps:
UpperCAmelCase__ : int = num_labels
if is_instance_map:
UpperCAmelCase__ : Optional[int] = list(range(_lowerCAmelCase ) ) * 2
UpperCAmelCase__ : Dict = dict(enumerate(_lowerCAmelCase ) )
UpperCAmelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ : List[str] = [Image.fromarray(_lowerCAmelCase ) for annotation in annotations]
UpperCAmelCase__ : List[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , _lowerCAmelCase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowerCAmelCase , pad_and_return_pixel_mask=_lowerCAmelCase , )
return inputs
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
def common(_lowerCAmelCase=False , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_lowerCAmelCase , is_instance_map=_lowerCAmelCase , segmentation_type=_lowerCAmelCase )
UpperCAmelCase__ : Any = inputs["""mask_labels"""]
UpperCAmelCase__ : Optional[Any] = inputs["""class_labels"""]
UpperCAmelCase__ : Any = inputs["""pixel_values"""]
UpperCAmelCase__ : Any = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_lowerCAmelCase )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = np.zeros((20, 50) )
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : str = binary_mask_to_rle(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : str = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase , target_sizes=_lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : List[Any] = image_processor.post_process_panoptic_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 79 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.