code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def _a ( SCREAMING_SNAKE_CASE = 10 ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase__ = 10**n
lowercase__ = 2_84_33 * (pow(2 , 7_83_04_57 , SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
_lowercase : float
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = ['''pixel_values''']
def __init__( self: Optional[Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: Tuple , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: Any , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: int , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Any , ) -> List[Any]:
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: List[Tuple] = None ) -> int:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(UpperCamelCase_ ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
lowerCAmelCase = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase = 'bart'
lowerCAmelCase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowercase__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowercase__ = qar_model.eval()
else:
lowercase__ , lowercase__ = (None, None)
if MODEL_TYPE == "bart":
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowercase__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowercase__ = sas_model.eval()
else:
lowercase__ , lowercase__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = faiss.StandardGpuResources()
lowercase__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
lowercase__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
lowercase__ = faiss.IndexFlatIP(1_28 )
lowercase__ = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
lowercase__ , lowercase__ = (None, None)
lowercase__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowercase__ = elia['''train_eli5''']
lowercase__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
lowercase__ = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_indexes()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_models()
lowerCAmelCase, lowerCAmelCase = load_train_data()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
lowercase__ = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
if source == "none":
lowercase__ , lowercase__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowercase__ , lowercase__ = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowercase__ , lowercase__ = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
lowercase__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowercase__ = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2_56 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ):
"""simple docstring"""
with torch.no_grad():
lowercase__ = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
lowerCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowerCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowerCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
lowerCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
lowerCAmelCase = action_list.index(action_st)
lowerCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
lowerCAmelCase = show_type == 'Show full text of passages'
else:
lowerCAmelCase = 3
lowerCAmelCase = True
lowerCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
lowerCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
lowerCAmelCase = 'wiki40b'
lowerCAmelCase = 'dense'
lowerCAmelCase = 'beam'
lowerCAmelCase = 2
lowerCAmelCase = 64
lowerCAmelCase = 256
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
lowerCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowerCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
lowerCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCAmelCase = None
# start main text
lowerCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowerCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase = st.text_input('Enter your question here:', '')
else:
lowerCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase = support_list[:10]
lowerCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase, lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
lowerCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
lowerCAmelCase = res[1].strip()
if sec_titles == "":
lowerCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
lowerCAmelCase = sec_titles.split(' & ')
lowerCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase = find_nearest_training(question)
lowerCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
lowerCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
lowerCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
lowercase__ = jnp.ones((batch_size, length) ) / length
return scores
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = None
lowercase__ = 20
lowercase__ = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase_ )
# tweak scores to not be uniform anymore
lowercase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase__ = jax.nn.softmax(UpperCamelCase_ , axis=-1 )
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
lowercase__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create ramp distribution
lowercase__ = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
lowercase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase__ = 5
lowercase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase__ = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, length) ).copy()
lowercase__ = top_k_warp_safety_check(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
lowercase__ = np.exp(top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowercase__ = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase__ = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
# check that min length is applied at length 5
lowercase__ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowercase__ = 5
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = 15
lowercase__ = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the bos_token_id score
lowercase__ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowercase__ = 1
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase__ = 3
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = 5
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase__ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowercase__ = 4
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase__ = 3
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowercase__ = 10
# no processor list
lowercase__ = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# with processor list
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase__ = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowercase__ = 10
# no processor list
def run_no_processor_list(UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] ):
lowercase__ = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowercase__ = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
# with processor list
def run_processor_list(UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase__ = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
lowercase__ = jax.jit(UpperCamelCase_ )
lowercase__ = jax.jit(UpperCamelCase_ )
lowercase__ = jitted_run_no_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jitted_run_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( UpperCamelCase__ ):
_lowercase : str = ['''image_processor''', '''tokenizer''']
_lowercase : Any = '''CLIPImageProcessor'''
_lowercase : str = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self: str , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: int , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , *UpperCamelCase_: str , **UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( UpperCamelCase__ ):
_lowercase : Any = '''vit_msn'''
def __init__( self: Any , UpperCamelCase_: List[Any]=768 , UpperCamelCase_: Any=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Dict=3_072 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: str=1E-0_6 , UpperCamelCase_: str=224 , UpperCamelCase_: List[Any]=16 , UpperCamelCase_: int=3 , UpperCamelCase_: int=True , **UpperCamelCase_: Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: Optional[int] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : str = ['''flax''', '''transformers''']
def __init__( self: Dict , *UpperCamelCase_: str , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: List[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: str , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = '''pt'''
lowercase__ = '''tf'''
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any ) -> int:
"""simple docstring"""
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = '''mock_framework'''
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _a ( SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if subparsers is not None:
lowercase__ = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase__ = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase__ = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=SCREAMING_SNAKE_CASE , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=SCREAMING_SNAKE_CASE , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase__ = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=SCREAMING_SNAKE_CASE , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase__ = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase__ = defaults.commands
if not args.tpu_name:
lowercase__ = defaults.tpu_name
if not args.tpu_zone:
lowercase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase__ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase__ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
lowercase__ = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
lowercase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase__ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase__ = '''; '''.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase__ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('''Successfully setup pod.''' )
def _a ( ):
"""simple docstring"""
lowercase__ = tpu_command_parser()
lowercase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=True , UpperCamelCase_: Union[str, Any]=99 , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: List[str]=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Dict=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Optional[Any]=128 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Dict=4 , UpperCamelCase_: Tuple=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = NezhaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: int , ) -> List[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = NezhaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
lowercase__ = NezhaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
lowercase__ = NezhaForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = NezhaForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = NezhaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = NezhaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = NezhaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = NezhaForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Tuple = True
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = NezhaModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = NezhaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''bert.pt''' ) )
lowercase__ = torch.jit.load(os.path.join(UpperCamelCase_ , '''bert.pt''' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCamelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCamelCase_ ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase__ = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
from math import pi, sqrt, tan
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase__ = (sidea + sidea + sidea) / 2
lowercase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = ['''pixel_values''']
def __init__( self: List[str] , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: List[str] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''shortest_edge''': 256}
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = offset
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowercase__ = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowercase__ = (size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = image.astype(np.floataa )
if offset:
lowercase__ = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowercase__ = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowercase__ = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowercase__ = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowercase__ = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowercase__ = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowercase__ = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = offset if offset is not None else self.offset
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase__ = make_batched(UpperCamelCase_ )
lowercase__ = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowercase__ = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if n == 0:
return 0
lowercase__ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE ) )
return max_revue
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
lowercase__ = max_revenue
return max_rev[n]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase__ = 0
for i in range(1 , n + 1 ):
lowercase__ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ = max(SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
lowercase__ = max_revenue_i
return max_rev[n]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n < 0:
lowercase__ = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if n > len(SCREAMING_SNAKE_CASE ):
lowercase__ = (
'''Each integral piece of rod must have a corresponding price. '''
f'Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE )}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = [6, 10, 12, 15, 20, 23]
lowercase__ = len(SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ = 36
lowercase__ = top_down_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = bottom_up_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE = 10_00 ):
"""simple docstring"""
lowercase__ = 2**power
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
lowerCAmelCase = solution(power)
print('Sum of the digits is: ', result)
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = BertTokenizer
_lowercase : int = BertTokenizerFast
_lowercase : Tuple = True
_lowercase : Union[str, Any] = True
_lowercase : Tuple = filter_non_english
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# With lower casing
lowercase__ = self.get_tokenizer(do_lower_case=UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer(do_lower_case=UpperCamelCase_ )
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCamelCase_ ( self: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
lowercase__ = '''a\n\'ll !!to?\'d of, can\'t.'''
lowercase__ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ = {}
for i, token in enumerate(UpperCamelCase_ ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ = tokenizer_r.encode_plus(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , )
lowercase__ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , '''do_lower_case''' ) else False
lowercase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = ['''的''', '''人''', '''有''']
lowercase__ = ''''''.join(UpperCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = True
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = False
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_ )
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 43 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ):
"""simple docstring"""
lowercase__ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE )
lowercase__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _a :
_lowercase : int
_lowercase : Node | None = None
_lowercase : Node | None = None
def _a ( ):
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(SCREAMING_SNAKE_CASE )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 0
return output
def _a ( ): # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(SCREAMING_SNAKE_CASE ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(SCREAMING_SNAKE_CASE ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , level=SCREAMING_SNAKE_CASE ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 1 |
import math
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 2
lowercase__ = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
lowercase__ = [True] * (end + 1)
lowercase__ = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
lowercase__ = False
start += 1
prime += in_prime
lowercase__ = end + 1
lowercase__ = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
lowercase__ = [True] * (high - low + 1)
for each in in_prime:
lowercase__ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
lowercase__ = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
lowercase__ = high + 1
lowercase__ = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 43 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''audio''': Audio()} )
_lowercase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowercase : str = "audio"
_lowercase : str = "labels"
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.label_schema.copy()
lowercase__ = features[self.label_column]
lowercase__ = label_schema
return task_template
@property
def lowerCamelCase_ ( self: Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''swinv2'''
_lowercase : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple=224 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: int=3 , UpperCamelCase_: List[Any]=96 , UpperCamelCase_: str=[2, 2, 6, 2] , UpperCamelCase_: Dict=[3, 6, 12, 24] , UpperCamelCase_: List[str]=7 , UpperCamelCase_: Optional[int]=4.0 , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Optional[int]=32 , **UpperCamelCase_: List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(UpperCamelCase_ )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = BioGptTokenizer
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''lower newer'''
lowercase__ = '''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase__ = '''lower'''
lowercase__ = ['''low''', '''er</w>''']
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + ['''<unk>''']
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from heapq import heappop, heappush
import numpy as np
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ , lowercase__ = grid.shape
lowercase__ = [-1, 1, 0, 0]
lowercase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ = [(0, source)], set()
lowercase__ = np.full((rows, cols) , np.inf )
lowercase__ = 0
lowercase__ = np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE )
lowercase__ = None
while queue:
((lowercase__) , (lowercase__)) = heappop(SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ = predecessors[x, y]
path.append(SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
lowercase__ = dist + 1
lowercase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 1 |
from __future__ import annotations
from collections import namedtuple
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = '''google/ncsnpp-church-256'''
lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['DeiTFeatureExtractor']
lowerCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
def __init__( self: int , UpperCamelCase_: int , UpperCamelCase_: str=13 , UpperCamelCase_: int=7 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=99 , UpperCamelCase_: int=32 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: Any=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: str=16 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Tuple=None , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , ) -> Tuple:
"""simple docstring"""
lowercase__ = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , ) -> Tuple:
"""simple docstring"""
lowercase__ = True
lowercase__ = True
lowercase__ = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowercase : Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : List[Any] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = LlamaModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = '''single_label_classification'''
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = '''multi_label_classification'''
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ids_tensor([1, 10] , config.vocab_size )
lowercase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowercase__ = original_model(UpperCamelCase_ ).last_hidden_state
lowercase__ = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ = {'''type''': scaling_type, '''factor''': 10.0}
lowercase__ = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowercase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
lowercase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class _a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
lowercase__ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase__ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase__ = '''Simply put, the theory of relativity states that '''
lowercase__ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase__ = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowercase__ = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowercase__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 43 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = 20 ):
"""simple docstring"""
lowercase__ = 1
for i in range(1 , n + 1 ):
lowercase__ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 43 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = MvpTokenizer
_lowercase : Tuple = MvpTokenizerFast
_lowercase : Dict = True
_lowercase : str = filter_roberta_detectors
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: str , **UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase_: int ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(UpperCamelCase_ , max_length=len(UpperCamelCase_ ) , padding=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCamelCase_ )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertNotIn('''labels''' , UpperCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase_ )
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(text_target=UpperCamelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = ['''A long paragraph for summarization.''']
lowercase__ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = inputs['''input_ids''']
lowercase__ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = '''A, <mask> AllenNLP sentence.'''
lowercase__ = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
lowercase__ = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 43 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
from __future__ import annotations
import math
lowerCAmelCase = '2020.9.26'
lowerCAmelCase = 'xcodz-dot, cclaus, dhruvmanila'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in locals().values() ):
lowercase__ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = ((x * distance) / (z + distance)) * scale
lowercase__ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''Axis must be a str''' )
lowercase__ = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in input_variables.values() ):
lowercase__ = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowercase__ = x * math.cos(SCREAMING_SNAKE_CASE ) - y * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = y * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z
elif axis == "x":
lowercase__ = y * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z * math.cos(SCREAMING_SNAKE_CASE ) + y * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = x
elif axis == "y":
lowercase__ = x * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = z * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
lowercase__ = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
lowercase__ , lowercase__ = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 43 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__snake_case = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
__snake_case = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = list(state_dict.keys() )
for name in state_dict_keys:
__UpperCamelCase = state_dict.pop(_lowercase )
# emb -> embedding
if name.startswith('emb.' ):
__UpperCamelCase = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__UpperCamelCase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__UpperCamelCase = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _lowercase )
# ffn -> feed_forward
__UpperCamelCase = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__UpperCamelCase = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__UpperCamelCase = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__UpperCamelCase = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__UpperCamelCase = 'rwkv.' + name
__UpperCamelCase = weight
return state_dict
def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ) -> Any:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__UpperCamelCase = 5_02_77
__UpperCamelCase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__UpperCamelCase = PreTrainedTokenizerFast(tokenizer_file=_lowercase )
__UpperCamelCase = len(_lowercase )
tokenizer.save_pretrained(_lowercase )
# 2. Build the config
__UpperCamelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__UpperCamelCase = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__UpperCamelCase = RwkvConfig(
vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowercase )
# 3. Download model file then convert state_dict
__UpperCamelCase = hf_hub_download(_lowercase , _lowercase )
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
__UpperCamelCase = convert_state_dict(_lowercase )
# 4. Split in shards and save
__UpperCamelCase, __UpperCamelCase = shard_checkpoint(_lowercase )
for shard_file, shard in shards.items():
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
if index is not None:
__UpperCamelCase = os.path.join(_lowercase , _lowercase )
# Save the index as well
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
__UpperCamelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '\n'
f.write(_lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__UpperCamelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(_lowercase )
model.push_to_hub(_lowercase , max_shard_size='2GB' )
tokenizer.push_to_hub(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 1 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = BlenderbotSmallTokenizer
a__ : List[str] = False
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def snake_case_ ( self : List[str] , **__lowerCAmelCase : Any ) -> Any:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[Any]:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def snake_case_ ( self : List[Any] ) -> int:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Any:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_A = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__lowerCAmelCase )['''input_ids''']
_A = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 0 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
UpperCamelCase = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
cpu_targs.append(A_ )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
UpperCamelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) , Write(A_ ) )
self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(A_ ):
UpperCamelCase = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
first_animations.append(GrowFromCenter(A_ , run_time=1 ) )
UpperCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(*A_ )
self.wait()
| 3 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[float] , UpperCamelCase__: list[float] ):
SCREAMING_SNAKE_CASE__ = sorted(numsa + numsa )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowerCamelCase = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 6 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 8 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE__ = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE__ = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCamelCase , __UpperCamelCase ) -> str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) , __UpperCamelCase ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
for key in product(__UpperCamelCase , repeat=3 ):
A__ = try_key(__UpperCamelCase , __UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def A ( __UpperCamelCase , __UpperCamelCase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCamelCase = "p059_cipher.txt" ) -> int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='utf-8' )
A__ = [int(__UpperCamelCase ) for number in data.strip().split(',' )]
A__ = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
A__ = filter_common_word(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
A__ = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase_ :
def __init__( self : str , _A : Tuple , _A : Tuple=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : Optional[int]=True , _A : Any=99 , _A : str=32 , _A : List[str]=5 , _A : List[Any]=4 , _A : Tuple=37 , _A : Dict="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : Dict=512 , _A : int=16 , _A : List[Any]=2 , _A : Union[str, Any]=0.02 , _A : int=3 , _A : Union[str, Any]=4 , _A : List[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = self.vocab_size - 1
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , *_A : List[str] ):
_UpperCamelCase = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , head_mask=_A )
_UpperCamelCase = model(_A , token_type_ids=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] , _A : int , _A : str , _A : int , *_A : Optional[Any] ):
_UpperCamelCase = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , *_A : List[str] ):
_UpperCamelCase = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : Optional[int] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , *_A : List[Any] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Optional[Any] , _A : Tuple , _A : Optional[int] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase_ ( self : Dict , _A : int , _A : Any , _A : Dict=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
_UpperCamelCase = inputs_dict['''labels''']
_UpperCamelCase = inputs_dict['''labels''']
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = OpenAIGPTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , n_embd=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_A )
_UpperCamelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_A ) # the president is
_UpperCamelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCamelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 10 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not all(x.isalpha() for x in string):
raise ValueError('''String must only contain alphabetic characters.''')
_a = sorted(string.lower())
return len(__A) == len(set(__A))
if __name__ == "__main__":
lowercase_ = input("Enter a string ").strip()
lowercase_ = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 11 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 0 |
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowercase__ : Union[str, Any] = """"""
while len(lowercase_ ) % 3 != 0:
lowercase__ : Tuple = """0""" + bin_string
lowercase__ : List[str] = [
bin_string[index : index + 3]
for index in range(len(lowercase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase__ : List[Any] = 0
for index, val in enumerate(lowercase_ ):
oct_val += int(2 ** (2 - index) * int(lowercase_ ) )
oct_string += str(lowercase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A__ : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A__ : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(UpperCAmelCase_ ) - np.asarray(UpperCAmelCase_ )) ** 2 ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 13 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__a ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a__ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __UpperCAmelCase ( __a : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_a : Dict = []
for num in range(len(__a ) ):
_a : Optional[Any] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableUnCLIPPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A__ = False
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = 32
lowercase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
lowercase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> List[str]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase__ = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 15 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "swin"
lowerCamelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , __lowerCamelCase : int=224 , __lowerCamelCase : int=4 , __lowerCamelCase : int=3 , __lowerCamelCase : str=96 , __lowerCamelCase : Any=[2, 2, 6, 2] , __lowerCamelCase : Tuple=[3, 6, 12, 24] , __lowerCamelCase : Any=7 , __lowerCamelCase : Optional[int]=4.0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=1e-5 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Tuple ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : List[str] ):
return 1e-4
| 16 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 0 |
import sys
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> int:
__A : Union[str, Any] = len(a__ )
__A : Any = [[0 for x in range(a__ )] for x in range(a__ )]
__A : Optional[Any] = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 ,a__ ):
for a in range(1 ,n - chain_length + 1 ):
__A : Tuple = a + chain_length - 1
__A : List[str] = sys.maxsize
for c in range(a__ ,a__ ):
__A : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__A : Any = cost
__A : Any = c
return matrix, sol
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ,a__ : Any ) -> Any:
if i == j:
print("""A""" + str(a__ ) ,end=""" """ )
else:
print("""(""" ,end=""" """ )
print_optiomal_solution(a__ ,a__ ,optimal_solution[i][j] )
print_optiomal_solution(a__ ,optimal_solution[i][j] + 1 ,a__ )
print(""")""" ,end=""" """ )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : Optional[int] = [30, 35, 15, 5, 10, 20, 25]
__A : Optional[int] = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__A , __A : Optional[int] = matrix_chain_order(a__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ ,1 ,n - 1 )
if __name__ == "__main__":
main()
| 17 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 0 |
from __future__ import annotations
import math
def _lowercase( __a : int ):
if num <= 0:
a__ =f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__a )
a__ =[True] * (num + 1)
a__ =[]
a__ =2
a__ =int(math.sqrt(__a ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__a )
# Set multiples of start be False
for i in range(start * start , num + 1 , __a ):
if sieve[i] is True:
a__ =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__a )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 20 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 0 |
def lowerCAmelCase_ ( lowerCamelCase ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__magic_name__ : List[Any] =numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
__magic_name__ : Dict =numbers[i]
if number < 0:
__magic_name__ , __magic_name__ : str =min_till_now, max_till_now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , max_till_now * number )
__magic_name__ : Optional[Any] =min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , lowerCamelCase )
return max_prod
| 21 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
_snake_case : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case_ (UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : list[list[int]] , ):
'''simple docstring'''
_a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
_a = 1
_a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
_a = init[0]
_a = init[1]
_a = 0
_a = g + heuristic[x][y] # cost from starting cell to destination cell
_a = [[f, g, x, y]]
_a = False # flag that is set when search is complete
_a = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_a = cell.pop()
_a = next_cell[2]
_a = next_cell[3]
_a = next_cell[1]
if x == goal[0] and y == goal[1]:
_a = True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
_a = x + DIRECTIONS[i][0]
_a = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_a = g + cost
_a = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_a = 1
_a = i
_a = []
_a = goal[0]
_a = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_a = x - DIRECTIONS[action[x][y]][0]
_a = y - DIRECTIONS[action[x][y]][1]
_a = xa
_a = ya
invpath.append([x, y] )
_a = []
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
_snake_case : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
_snake_case : Tuple = 1
# the cost map which pushes the path closer to the goal
_snake_case : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case : Optional[int] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case : str = 99
_snake_case , _snake_case : Any = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 22 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """mra"""
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="absolute" , _UpperCAmelCase=4 , _UpperCAmelCase="full" , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = block_per_row
UpperCamelCase_ = approx_mode
UpperCamelCase_ = initial_prior_first_n_blocks
UpperCamelCase_ = initial_prior_diagonal_n_blocks
| 23 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 24 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =True
@register_to_config
def __init__( self : Dict , a : int = 3 , a : int = 3 , a : Tuple[str] = ("DownEncoderBlock2D",) , a : Tuple[str] = ("UpDecoderBlock2D",) , a : Tuple[int] = (64,) , a : int = 1 , a : str = "silu" , a : int = 4 , a : int = 32 , a : int = 32 , a : float = 0.1_8215 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : List[Any] = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : Dict = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , norm_num_groups=a , act_fn=a , )
SCREAMING_SNAKE_CASE : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE : Dict = nn.Convad(a , a , 1 )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE : Dict = self.config.sample_size
SCREAMING_SNAKE_CASE : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.25
def __UpperCamelCase ( self : Optional[Any] , a : Dict , a : Tuple=False ) -> List[str]:
"""simple docstring"""
if isinstance(a , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE : Any = value
def __UpperCamelCase ( self : Tuple , a : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = use_tiling
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
self.enable_tiling(a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = True
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Any ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {}
def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ):
if hasattr(a , "set_processor" ):
SCREAMING_SNAKE_CASE : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , a , a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a , a , a )
return processors
def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(a , a ) and len(a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : Any ):
if hasattr(a , "set_processor" ):
if not isinstance(a , a ):
module.set_processor(a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a )
for name, module in self.named_children():
fn_recursive_attn_processor(a , a , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCamelCase ( self : Optional[Any] , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a , return_dict=a )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE : str = [self.encoder(a ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Any = self.quant_conv(a )
SCREAMING_SNAKE_CASE : Optional[Any] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : List[str] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a , return_dict=a )
SCREAMING_SNAKE_CASE : int = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
@apply_forward_hook
def __UpperCamelCase ( self : str , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE : List[str] = [self._decode(a ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE : Any = torch.cat(a )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._decode(a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : Union[str, Any] , a : Optional[int] , a : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[2] , b.shape[2] , a )
for y in range(a ):
SCREAMING_SNAKE_CASE : List[str] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCamelCase ( self : Any , a : Optional[int] , a : Optional[Any] , a : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = min(a.shape[3] , b.shape[3] , a )
for x in range(a ):
SCREAMING_SNAKE_CASE : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCamelCase ( self : Dict , a : torch.FloatTensor , a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , x.shape[2] , a ):
SCREAMING_SNAKE_CASE : Dict = []
for j in range(0 , x.shape[3] , a ):
SCREAMING_SNAKE_CASE : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder(a )
SCREAMING_SNAKE_CASE : Optional[int] = self.quant_conv(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Tuple = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : List[str] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : str = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat(a , dim=2 )
SCREAMING_SNAKE_CASE : List[str] = DiagonalGaussianDistribution(a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE : str = []
for i in range(0 , z.shape[2] , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for j in range(0 , z.shape[3] , a ):
SCREAMING_SNAKE_CASE : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE : Optional[int] = self.post_quant_conv(a )
SCREAMING_SNAKE_CASE : List[Any] = self.decoder(a )
row.append(a )
rows.append(a )
SCREAMING_SNAKE_CASE : Any = []
for i, row in enumerate(a ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for j, tile in enumerate(a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_v(rows[i - 1][j] , a , a )
if j > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_h(row[j - 1] , a , a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a , dim=3 ) )
SCREAMING_SNAKE_CASE : Dict = torch.cat(a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def __UpperCamelCase ( self : List[Any] , a : torch.FloatTensor , a : bool = False , a : bool = True , a : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sample
SCREAMING_SNAKE_CASE : Optional[int] = self.encode(a ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE : Optional[int] = posterior.sample(generator=a )
else:
SCREAMING_SNAKE_CASE : Tuple = posterior.mode()
SCREAMING_SNAKE_CASE : List[str] = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
| 25 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = CanineTokenizer
a__: Tuple = False
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
lowerCamelCase_ = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertIn('''token_type_ids''' , UpperCAmelCase )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowerCamelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ = chr(0Xe_007 )
additional_special_tokens.append(UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ , lowerCamelCase_ = self.get_clean_sequence(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_005
lowerCamelCase_ = chr(UpperCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
lowerCamelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , input_encoded + special_token_id )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = chr(0Xe_005 )
lowerCamelCase_ = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase )
self.assertEqual(token_a[0] , UpperCAmelCase )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase )
tokenizer.from_pretrained(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = [new_token_a]
lowerCamelCase_ = [new_token_a]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ = 0Xe_007
lowerCamelCase_ = chr(UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )]
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''hello world'''
if self.space_between_special_tokens:
lowerCamelCase_ = '''[CLS] hello world [SEP]'''
else:
lowerCamelCase_ = input
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase , [output, output.lower()] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ = '''a'''
lowerCamelCase_ = ord(UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
| 29 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__a = datasets.logging.get_logger(__name__)
__a = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__a = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__a = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__a = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/google-research/bleurt''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/google-research/bleurt'''] ,reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
UpperCAmelCase_ : Tuple = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase_ : Tuple = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase_ : Any = score.BleurtScorer(os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.scorer.score(references=_SCREAMING_SNAKE_CASE ,candidates=_SCREAMING_SNAKE_CASE )
return {"scores": scores}
| 30 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
try:
with open(__UpperCAmelCase , 'rb' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ = from_bytes(__UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCAmelCase , sep='.' )
SCREAMING_SNAKE_CASE_ = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
SCREAMING_SNAKE_CASE_ = '.'.join(__UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__UpperCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
' use it for predictions and inference.' )
return pt_model
| 31 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class __UpperCamelCase :
def __init__( self ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = config_dict.get('''image_processor_type''' , _UpperCamelCase )
_UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.image_processor_type``
_UpperCAmelCase = getattr(_UpperCamelCase , '''image_processor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_UpperCAmelCase = image_processor_class_from_name(_UpperCamelCase )
_UpperCAmelCase = image_processor_auto_map is not None
_UpperCAmelCase = image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )]
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 32 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
snake_case__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
snake_case__ , snake_case__ = emb.weight.shape
snake_case__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' )
snake_case__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
snake_case__ = mam_aaa['''model''']
remove_ignore_keys_(__lowerCAmelCase )
snake_case__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
snake_case__ = state_dict['''decoder.embed_tokens.weight''']
snake_case__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
snake_case__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ : Optional[int] = parser.parse_args()
lowerCamelCase__ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 33 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''esm'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_6 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = emb_layer_norm_before
UpperCamelCase = token_dropout
UpperCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''')
UpperCamelCase = EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = EsmFoldConfig(**lowerCamelCase_)
UpperCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''')
UpperCamelCase = get_default_vocab_list()
else:
UpperCamelCase = vocab_list
else:
UpperCamelCase = None
UpperCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowerCamelCase_):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''')
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_):
UpperCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def UpperCAmelCase__ ( self) -> Tuple:
if self.trunk is None:
UpperCamelCase = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_):
UpperCamelCase = TrunkConfig(**self.trunk)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = asdict(self)
UpperCamelCase = self.trunk.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def UpperCAmelCase__ ( self) -> Tuple:
if self.structure_module is None:
UpperCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_):
UpperCamelCase = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
UpperCamelCase = self.sequence_state_dim // self.sequence_head_width
UpperCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = asdict(self)
UpperCamelCase = self.structure_module.to_dict()
return output
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1E-8
A_ = 1E5
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return asdict(self)
def __snake_case ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict=7 , _lowercase : List[Any]=3 , _lowercase : str=18 , _lowercase : Optional[int]=30 , _lowercase : List[Any]=4_00 , _lowercase : Any=True , _lowercase : Optional[int]=None , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : int=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : Dict=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=False , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : int = do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple = crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean
SCREAMING_SNAKE_CASE__ : int = image_std
SCREAMING_SNAKE_CASE__ : Optional[int] = do_reduce_labels
def lowercase__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(dataset[1]['''file'''] )
return image, map
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = BeitImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = BeitImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = []
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE__ : str = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 35 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
import baseaa
def lowercase ( __A : str ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase ( __A : bytes ) -> str:
'''simple docstring'''
return baseaa.baadecode(__A ).decode("""utf-8""" )
if __name__ == "__main__":
__lowercase : int = '''Hello World!'''
__lowercase : Union[str, Any] = baseaa_encode(test)
print(encoded)
__lowercase : Any = baseaa_decode(encoded)
print(decoded)
| 36 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase_ ( ) -> Optional[Any]:
a__, a__ : Tuple = 9, 14 # noqa: F841
a__ : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : str = defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : Union[str, Any] = mst(__a )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : List[str] = tuple(answer[:2] )
a__ : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 37 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 0 |
'''simple docstring'''
from math import pow
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case__ : List[str] = int(pow(__magic_name__ , __magic_name__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case__ , snake_case__ : Union[str, Any] = backtrack(
__magic_name__ , __magic_name__ , current_number + 1 , __magic_name__ , __magic_name__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case__ , snake_case__ : Optional[Any] = backtrack(
__magic_name__ , __magic_name__ , current_number + 1 , __magic_name__ , __magic_name__ )
return current_sum, solutions_count
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__magic_name__ , __magic_name__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 43 | 0 |
import cva
import numpy as np
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : float , _UpperCamelCase : int ) ->int:
if k in (0.04, 0.06):
snake_case_ = k
snake_case_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Optional[Any] ) ->str:
return str(self.k )
def snake_case__( self : Any , _UpperCamelCase : str ) ->tuple[cva.Mat, list[list[int]]]:
snake_case_ = cva.imread(_UpperCamelCase , 0 )
snake_case_, snake_case_ = img.shape
snake_case_ = []
snake_case_ = img.copy()
snake_case_ = cva.cvtColor(_UpperCamelCase , cva.COLOR_GRAY2RGB )
snake_case_, snake_case_ = np.gradient(_UpperCamelCase )
snake_case_ = dx**2
snake_case_ = dy**2
snake_case_ = dx * dy
snake_case_ = 0.04
snake_case_ = self.window_size // 2
for y in range(_UpperCamelCase , h - offset ):
for x in range(_UpperCamelCase , w - offset ):
snake_case_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = (wxx * wyy) - (wxy**2)
snake_case_ = wxx + wyy
snake_case_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase_ = HarrisCorner(0.04, 3)
lowerCAmelCase_ , lowerCAmelCase_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 39 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__UpperCAmelCase = True
from torch.cuda.amp import autocast
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ : List[str]=None , snake_case__ : List[str]=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : Optional[bool] = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self, SCREAMING_SNAKE_CASE_ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
UpperCamelCase : Any = [{'input_values': feature['input_values']} for feature in features]
UpperCamelCase : int = [{'input_ids': feature['labels']} for feature in features]
UpperCamelCase : Dict = self.processor.pad(
SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', )
UpperCamelCase : Optional[int] = self.processor.pad(
labels=SCREAMING_SNAKE_CASE_, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt', )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ), -100 )
UpperCamelCase : Dict = labels
return batch
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
model.train()
UpperCamelCase : Optional[Any] = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
if self.use_amp:
with autocast():
UpperCamelCase : str = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = self.compute_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : Optional[int] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE_ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE_, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCamelCase : Optional[int] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCamelCase : str = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
UpperCamelCase : List[Any] = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = re.sub(snake_case__ , '' , batch['sentence'] ).lower() + ' '
return batch
UpperCamelCase : List[Any] = train_dataset.map(snake_case__ , remove_columns=['sentence'] )
UpperCamelCase : Dict = eval_dataset.map(snake_case__ , remove_columns=['sentence'] )
def extract_all_chars(snake_case__ : Dict ):
UpperCamelCase : Union[str, Any] = ' '.join(batch['text'] )
UpperCamelCase : List[str] = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Any = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
UpperCamelCase : List[Any] = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Tuple = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
UpperCamelCase : Optional[Any] = {v: k for k, v in enumerate(snake_case__ )}
UpperCamelCase : Optional[int] = vocab_dict[' ']
del vocab_dict[" "]
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : Dict = len(snake_case__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[Any] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
UpperCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Optional[Any] = min(len(snake_case__ ) , data_args.max_train_samples )
UpperCamelCase : Tuple = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
UpperCamelCase : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCamelCase : int = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ : int ):
UpperCamelCase , UpperCamelCase : Tuple = torchaudio.load(batch['path'] )
UpperCamelCase : Optional[Any] = resampler(snake_case__ ).squeeze().numpy()
UpperCamelCase : Dict = 16000
UpperCamelCase : Optional[int] = batch['text']
return batch
UpperCamelCase : Optional[int] = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : str = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
UpperCamelCase : Dict = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(snake_case__ )
return batch
UpperCamelCase : int = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Dict = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : List[str] = datasets.load_metric('wer' )
def compute_metrics(snake_case__ : Union[str, Any] ):
UpperCamelCase : str = pred.predictions
UpperCamelCase : int = np.argmax(snake_case__ , axis=-1 )
UpperCamelCase : List[Any] = processor.tokenizer.pad_token_id
UpperCamelCase : int = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
UpperCamelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
UpperCamelCase : Dict = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Optional[int] = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
UpperCamelCase : Tuple = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase : List[str] = model_args.model_name_or_path
else:
UpperCamelCase : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCamelCase : Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
UpperCamelCase : Dict = train_result.metrics
UpperCamelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
UpperCamelCase : Union[str, Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('train' , snake_case__ )
trainer.save_metrics('train' , snake_case__ )
trainer.save_state()
# Evaluation
UpperCamelCase : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase : int = trainer.evaluate()
UpperCamelCase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
UpperCamelCase : Optional[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
return results
if __name__ == "__main__":
main()
| 40 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[
'''https://github.com/m-popovic/chrF''',
] ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int = CHRF.CHAR_ORDER ,lowercase__ : int = CHRF.WORD_ORDER ,lowercase__ : int = CHRF.BETA ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,):
__lowercase = len(references[0] )
if any(len(lowercase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase = [[refs[i] for refs in references] for i in range(lowercase__ )]
__lowercase = CHRF(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = sb_chrf.corpus_score(lowercase__ ,lowercase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 41 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 0 |
'''simple docstring'''
from math import pow, sqrt
def _UpperCamelCase ( *__UpperCamelCase ) -> bool:
lowerCamelCase_ = len(__UpperCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 42 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = False
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : str = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
UpperCAmelCase_ : List[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
UpperCAmelCase_ : Optional[int] = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
UpperCAmelCase_ : List[str] = reader.read()
UpperCAmelCase_ : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
UpperCAmelCase_ : Dict = UNetaDModel(**config)
else:
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
UpperCAmelCase_ : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ : Optional[int] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ : Optional[int] = config[key]
del config[key]
UpperCAmelCase_ : List[str] = [k.replace('UNetRes', '') for k in config['down_block_types']]
UpperCAmelCase_ : Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
UpperCAmelCase_ : List[str] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
UpperCAmelCase_ : Optional[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
UpperCAmelCase_ : Optional[int] = param_value
UpperCAmelCase_ : List[Any] = True
if not has_changed:
UpperCAmelCase_ : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 44 |
class _a :
def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 43 | 0 |
from math import isqrt
def A ( lowercase__ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def A ( lowercase__ : int = 10**6 ) -> int:
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'open-llama'
def __init__( self: List[Any] ,__lowerCAmelCase: int=100_000 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: Optional[int]=11_008 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: int="silu" ,__lowerCAmelCase: Dict=2_048 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Any=1e-6 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = rms_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : str = kwargs.pop(
"use_memorry_efficient_attention" ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_dropout_prob
_lowerCamelCase : Optional[int] = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,tie_word_embeddings=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,__lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get("type" ,__lowerCAmelCase )
_lowerCamelCase : int = self.rope_scaling.get("factor" ,__lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 46 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import os
import numpy
import onnx
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
__a : Union[str, Any] = a.name
__a : Dict = b.name
__a : Dict = ''
__a : Tuple = ''
__a : Optional[int] = a == b
__a : Dict = name_a
__a : Tuple = name_b
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__a : Any = list(model.graph.initializer )
__a : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a : Tuple = inits[i].name
__a : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : int = os.path.dirname(lowerCamelCase_ )
__a : Any = os.path.basename(lowerCamelCase_ )
__a : Any = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
__a : str = list(model.graph.initializer )
__a : Union[str, Any] = set()
__a : Union[str, Any] = {}
__a : Optional[int] = []
__a : Dict = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
__a : Union[str, Any] = inits[j].data_type
__a : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
__a : Dict = inits[i].name
__a : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
__a : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__a : Optional[Any] = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a : Tuple = 'optimized_' + model_file_name
__a : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '▁'
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
lowerCAmelCase = {'vinai/bartpho-syllable': 1024}
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = monolingual_vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ = {}
lowercase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = cnt
cnt += 1
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__ = line.strip().split()[0]
lowercase__ = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowercase__ = len(self.fairseq_tokens_to_ids )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(UpperCamelCase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 43 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = CLIPTokenizer
snake_case__ :Optional[Any] = CLIPTokenizerFast
snake_case__ :Optional[Any] = True
snake_case__ :Dict = {}
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCAmelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__magic_name__ : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__magic_name__ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase__ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase__ = tokenizer_s.tokenize(__magic_name__ )
lowerCAmelCase__ = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase__ = tokenizer_s.tokenize(__magic_name__ )
lowerCAmelCase__ = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(__magic_name__ )
lowerCAmelCase__ = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(__magic_name__ )
lowerCAmelCase__ = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
lowerCAmelCase__ = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowerCAmelCase__ = f""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
lowerCAmelCase__ = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
| 48 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = ""
a__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a__ : str = None # compression type in fsspec. ex: "gzip"
a__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] , _lowercase : str = "" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , **_lowercase : Dict ):
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase = fsspec.open(
_lowercase , mode='''rb''' , protocol=_lowercase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase = os.path.basename(self.file.path.split('''::''' )[0] )
__UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase = None
@classmethod
def a ( cls : Tuple , _lowercase : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowercase ).lstrip('''/''' )
def a ( self : Dict ):
if self.dir_cache is None:
__UpperCAmelCase = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__UpperCAmelCase = {f['''name''']: f}
def a ( self : Optional[int] , _lowercase : str ):
return self.file.open().read()
def a ( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[Any]=None , _lowercase : Any=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "bz2"
a__ : List[str] = "bz2"
a__ : List[str] = ".bz2"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "gzip"
a__ : Tuple = "gzip"
a__ : Optional[int] = ".gz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "lz4"
a__ : List[str] = "lz4"
a__ : Union[str, Any] = ".lz4"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = "xz"
a__ : Optional[Any] = "xz"
a__ : List[str] = ".xz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "zstd"
a__ : Any = "zstd"
a__ : Union[str, Any] = ".zst"
def __init__( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , _lowercase : int = DEFAULT_BLOCK_SIZE , **_lowercase : Any , ):
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase = self.file.__enter__
class _UpperCAmelCase :
def __init__( self : int , _lowercase : Optional[int] ):
__UpperCAmelCase = file_
def __enter__( self : Tuple ):
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *_lowercase : int , **_lowercase : int ):
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def a ( self : int ):
return next(self._file )
def __getattr__( self : Dict , _lowercase : Union[str, Any] ):
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase : Union[str, Any] , **_lowercase : Optional[Any] ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
__UpperCAmelCase = fixed_enter
| 49 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _a ( ):
"""simple docstring"""
lowercase__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: int ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ = get_results(UpperCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
| 43 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int=None ):
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCamelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
lowerCamelCase__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCAmelCase ):
lowerCamelCase__ = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict=None ):
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCamelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
lowerCamelCase__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCAmelCase ):
lowerCamelCase__ = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCamelCase__ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase , allow_redirects=__lowerCAmelCase )
lowerCamelCase__ = result.headers["""Location"""]
lowerCamelCase__ = requests.get(__lowerCAmelCase , allow_redirects=__lowerCAmelCase )
lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{artifact_name}.zip''' )
with open(__lowerCAmelCase , """wb""" ) as fp:
fp.write(response.content )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str]=None ):
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = None
with zipfile.ZipFile(__lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCAmelCase ) as f:
for line in f:
lowerCamelCase__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase__ = line[: line.index(""": """ )]
lowerCamelCase__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCamelCase__ = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCAmelCase )
elif filename == "job_name.txt":
lowerCamelCase__ = line
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCAmelCase )} for `errors` '''
F'''and {len(__lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
lowerCamelCase__ = None
if job_name and job_links:
lowerCamelCase__ = job_links.get(__lowerCAmelCase , __lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase__ = [x + [y] + [job_link] for x, y in zip(__lowerCAmelCase , __lowerCAmelCase )]
return result
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=None ):
lowerCamelCase__ = []
lowerCamelCase__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCAmelCase , job_links=__lowerCAmelCase ) )
return errors
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=None ):
lowerCamelCase__ = Counter()
counter.update([x[1] for x in logs] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) )
return r
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCamelCase__ = test.split("""/""" )[2]
else:
lowerCamelCase__ = None
return test
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any=None ):
lowerCamelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCamelCase__ = [x for x in logs if x[2] is not None]
lowerCamelCase__ = {x[2] for x in logs}
lowerCamelCase__ = {}
for test in tests:
lowerCamelCase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase__ = sum(error_counts.values() )
if n_errors > 0:
lowerCamelCase__ = {"""count""": n_errors, """errors""": error_counts}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=__lowerCAmelCase ) )
return r
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = """| no. | error | status |"""
lowerCamelCase__ = """|-:|:-|:-|"""
lowerCamelCase__ = [header, sep]
for error in reduced_by_error:
lowerCamelCase__ = reduced_by_error[error]["""count"""]
lowerCamelCase__ = F'''| {count} | {error[:100]} | |'''
lines.append(__lowerCAmelCase )
return "\n".join(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = """| model | no. of errors | major error | count |"""
lowerCamelCase__ = """|-:|-:|-:|-:|"""
lowerCamelCase__ = [header, sep]
for model in reduced_by_model:
lowerCamelCase__ = reduced_by_model[model]["""count"""]
lowerCamelCase__ , lowerCamelCase__ = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCamelCase__ = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowerCAmelCase )
return "\n".join(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCamelCase : List[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase : int = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase : int = k.find(' / ')
UpperCamelCase : List[str] = k[index + len(' / ') :]
UpperCamelCase : Any = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase : str = reduce_by_error(errors)
UpperCamelCase : Any = reduce_by_model(errors)
UpperCamelCase : Optional[int] = make_github_table(reduced_by_error)
UpperCamelCase : List[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 50 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __snake_case ( self : int ):
UpperCAmelCase = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
UpperCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(a__ , env=os.environ.copy() )
| 51 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( a_ :str) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __A ( ) -> Union[str, Any]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
__a : List[Any] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def __A ( a_ :Optional[Any]) -> Optional[int]:
__a : Optional[Any] = [1, 2]
__a : Dict = {'''a''': 1, '''b''': 2}
__a : Tuple = {'''a''': [1, 2], '''b''': [3, 4]}
__a : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
__a : str = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__a : Dict = [2, 3]
__a : Tuple = {'''a''': 2, '''b''': 3}
__a : str = {'''a''': [2, 3], '''b''': [4, 5]}
__a : str = {'''a''': {'''1''': 2}, '''b''': 3}
__a : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 52 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_snake_case : Dict = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[int]=6.0 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple="fp4" , lowerCAmelCase_ : Dict=False , **lowerCAmelCase_ : int , ) -> Optional[int]:
__lowerCAmelCase = load_in_abit
__lowerCAmelCase = load_in_abit
__lowerCAmelCase = llm_inta_threshold
__lowerCAmelCase = llm_inta_skip_modules
__lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload
__lowerCAmelCase = llm_inta_has_fpaa_weight
__lowerCAmelCase = bnb_abit_quant_type
__lowerCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowerCAmelCase = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
__lowerCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def lowercase ( self : List[str] ) -> int:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def lowercase ( self : int ) -> int:
return self.load_in_abit or self.load_in_abit
def lowercase ( self : Optional[Any] ) -> Any:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase ( cls : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Any:
__lowerCAmelCase = cls(**lowerCAmelCase_ )
__lowerCAmelCase = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase ( self : str , lowerCAmelCase_ : Union[str, os.PathLike] ) -> Any:
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
__lowerCAmelCase = self.to_dict()
__lowerCAmelCase = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + '\n'
writer.write(lowerCAmelCase_ )
def lowercase ( self : Any ) -> Dict[str, Any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Optional[int] ) -> Union[str, Any]:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def lowercase ( self : int , lowerCAmelCase_ : bool = True ) -> str:
if use_diff is True:
__lowerCAmelCase = self.to_diff_dict()
else:
__lowerCAmelCase = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def lowercase ( self : Any ) -> Dict[str, Any]:
__lowerCAmelCase = self.to_dict()
# get the default config dict
__lowerCAmelCase = BitsAndBytesConfig().to_dict()
__lowerCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowerCAmelCase = value
return serializable_config_dict
| 53 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =split_dict._to_yaml_list()
assert len(lowercase__ ) == len(lowercase__ )
UpperCAmelCase_ =SplitDict._from_yaml_list(lowercase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ =None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowercase__ ), SplitInfo(dataset_name="my_dataset" )] )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 54 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE :Optional[Any] = 'scheduler_config.json'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
snake_case_ = 3
snake_case_ = 4
snake_case_ = 5
snake_case_ = 6
snake_case_ = 7
snake_case_ = 8
snake_case_ = 9
snake_case_ = 10
snake_case_ = 11
snake_case_ = 12
snake_case_ = 13
snake_case_ = 14
@dataclass
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = SCHEDULER_CONFIG_NAME
snake_case_ = []
snake_case_ = True
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,A : Dict[str, Any] = None ,A : Optional[str] = None ,A : str=False ,**A : List[Any] ,):
__A , __A , __A = cls.load_config(
pretrained_model_name_or_path=A ,subfolder=A ,return_unused_kwargs=A ,return_commit_hash=A ,**A ,)
return cls.from_config(A ,return_unused_kwargs=A ,**A )
def UpperCamelCase_ ( self : Any ,A : Union[str, os.PathLike] ,A : bool = False ,**A : Dict ):
self.save_config(save_directory=A ,push_to_hub=A ,**A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self._get_compatibles()
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
__A = list(set([cls.__name__] + cls._compatibles ) )
__A = importlib.import_module(__name__.split("." )[0] )
__A = [
getattr(A ,A ) for c in compatible_classes_str if hasattr(A ,A )
]
return compatible_classes
| 55 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
with open(lowercase__ , 'rb' ) as flax_state_f:
__snake_case = from_bytes(lowercase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
__snake_case = ''
__snake_case = flatten_dict(lowercase__ , sep='.' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__snake_case = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case = '.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__snake_case = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 56 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) )
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3 , _lowerCamelCase=3_2 , _lowerCamelCase=0.2_5 , _lowerCamelCase=8 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu6" , _lowerCamelCase=1_2_8_0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=None , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: List[str] = batch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: Union[str, Any] = image_size
UpperCamelCase_: int = depth_multiplier
UpperCamelCase_: Optional[int] = depth_divisible_by
UpperCamelCase_: Optional[int] = min_depth
UpperCamelCase_: List[Any] = expand_ratio
UpperCamelCase_: List[Any] = tf_padding
UpperCamelCase_: str = output_stride
UpperCamelCase_: Any = first_layer_is_expansion
UpperCamelCase_: Optional[int] = finegrained_output
UpperCamelCase_: Optional[int] = hidden_act
UpperCamelCase_: Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_: Optional[int] = classifier_dropout_prob
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: Optional[Any] = is_training
UpperCamelCase_: Optional[Any] = num_labels
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: Union[str, Any] = scope
def _a ( self ):
UpperCamelCase_: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: str = None
if self.use_labels:
UpperCamelCase_: List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_: List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.num_labels
UpperCamelCase_: Dict = MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Union[str, Any] = MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_: Dict = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Any = config_and_inputs
UpperCamelCase_: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[str] =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a : Dict =(
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : List[str] =False
a : Tuple =False
a : List[Any] =False
a : List[str] =False
def _a ( self ):
UpperCamelCase_: Any = MobileNetVaModelTester(self )
UpperCamelCase_: Tuple = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def _a ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Any = model_class(_lowerCamelCase )
UpperCamelCase_: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: str = [*signature.parameters.keys()]
UpperCamelCase_: Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Tuple = outputs.hidden_states
UpperCamelCase_: Union[str, Any] = 1_6
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: int = MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> List[str]:
UpperCamelCase_: int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase )
UpperCamelCase_: Any = self.default_image_processor
UpperCamelCase_: Optional[int] = prepare_img()
UpperCamelCase_: int = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: Tuple = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Any = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Dict = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_: List[Any] = model.to(_lowerCamelCase )
UpperCamelCase_: int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_: List[Any] = prepare_img()
UpperCamelCase_: Dict = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: Dict = model(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = outputs.logits
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_: int = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 57 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bit'''
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
_lowerCamelCase = ['''SAME''', '''VALID''']
def __init__( self , _lowercase=3 , _lowercase=6_4 , _lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowercase=[3, 4, 6, 3] , _lowercase="preactivation" , _lowercase="relu" , _lowercase=None , _lowercase=3_2 , _lowercase=0.0 , _lowercase=False , _lowercase=3_2 , _lowercase=1 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ : Optional[Any] = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
snake_case_ : str = num_channels
snake_case_ : Tuple = embedding_size
snake_case_ : Any = hidden_sizes
snake_case_ : int = depths
snake_case_ : Tuple = layer_type
snake_case_ : List[Any] = hidden_act
snake_case_ : int = global_padding
snake_case_ : List[Any] = num_groups
snake_case_ : Any = drop_path_rate
snake_case_ : Optional[int] = embedding_dynamic_padding
snake_case_ : int = output_stride
snake_case_ : Tuple = width_factor
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 58 |
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "segformer"
def __init__(self : List[Any] , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Dict=[32, 64, 160, 256] , UpperCAmelCase_ : List[Any]=[7, 3, 3, 3] , UpperCAmelCase_ : int=[4, 2, 2, 2] , UpperCAmelCase_ : Dict=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =num_channels
lowerCamelCase__: Optional[Any] =num_encoder_blocks
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: str =sr_ratios
lowerCamelCase__: Dict =hidden_sizes
lowerCamelCase__: Dict =patch_sizes
lowerCamelCase__: str =strides
lowerCamelCase__: Dict =mlp_ratios
lowerCamelCase__: Dict =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =classifier_dropout_prob
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Tuple =drop_path_rate
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: int =decoder_hidden_size
lowerCamelCase__: Tuple =kwargs.get("reshape_last_stage" , UpperCAmelCase_)
lowerCamelCase__: List[str] =semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->float:
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return 12
| 59 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=99 , __magic_name__=13 , __magic_name__=16 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=2 , __magic_name__=32 , __magic_name__=4 , __magic_name__=4 , __magic_name__=30 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = decoder_seq_length
# For common tests
snake_case_ : Tuple = self.decoder_seq_length
snake_case_ : Tuple = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Tuple = d_model
snake_case_ : Tuple = d_model
snake_case_ : str = decoder_layers
snake_case_ : List[str] = decoder_layers
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : int = decoder_attention_heads
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[Any] = eos_token_id
snake_case_ : Optional[int] = bos_token_id
snake_case_ : Optional[Any] = pad_token_id
snake_case_ : str = decoder_start_token_id
snake_case_ : List[Any] = use_cache
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[int] = None
snake_case_ : List[str] = decoder_seq_length
snake_case_ : str = 2
snake_case_ : Union[str, Any] = 1
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Any = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : List[str] = True
snake_case_ : List[Any] = TrOCRDecoder(config=__magic_name__ ).to(__magic_name__ ).eval()
snake_case_ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : Any = model(__magic_name__ , use_cache=__magic_name__ )
snake_case_ : int = model(__magic_name__ )
snake_case_ : int = model(__magic_name__ , use_cache=__magic_name__ )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 )
snake_case_ : Union[str, Any] = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Tuple = model(__magic_name__ )['''last_hidden_state''']
snake_case_ : str = model(__magic_name__ , past_key_values=__magic_name__ )['''last_hidden_state''']
# select random slice
snake_case_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : int = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Union[str, Any] = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=__magic_name__ )
snake_case_ : int = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
| 60 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.