code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Optional[Any] = int(length / 2 )
for i in range(UpperCamelCase , low + middle ):
comp_and_swap(UpperCamelCase , UpperCamelCase , i + middle , UpperCamelCase )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
bitonic_merge(UpperCamelCase , low + middle , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Tuple = int(length / 2 )
bitonic_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , 1 )
bitonic_sort(UpperCamelCase , low + middle , UpperCamelCase , 0 )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by a comma:\n''').strip()
_A : List[str] =[int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 41 | """simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Any , _A : Any) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : List[Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : str) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Union[str, Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : List[str]) -> Dict:
__snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype)
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Dropout(self.dropout_prob)
__snake_case : Dict = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Optional[Any] = None
if use_nin_shortcut:
__snake_case : List[str] = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any:
__snake_case : List[Any] = hidden_states
__snake_case : Optional[Any] = self.norma(_A)
__snake_case : int = nn.swish(_A)
__snake_case : Optional[int] = self.conva(_A)
__snake_case : Dict = self.time_emb_proj(nn.swish(_A))
__snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1)
__snake_case : Any = hidden_states + temb
__snake_case : Tuple = self.norma(_A)
__snake_case : Dict = nn.swish(_A)
__snake_case : Union[str, Any] = self.dropout(_A , _A)
__snake_case : Union[str, Any] = self.conva(_A)
if self.conv_shortcut is not None:
__snake_case : List[Any] = self.conv_shortcut(_A)
return hidden_states + residual
| 172 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase ():
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 25 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__lowercase = logging.getLogger()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCamelCase :Any = parser.parse_args()
return args.f
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {}
__UpperCamelCase :str = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
__UpperCamelCase :Optional[Any] = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
__lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls) -> int:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase :List[Any] = tempfile.mkdtemp()
__UpperCamelCase :List[Any] = os.path.join(cls.tmpdir , '''default_config.yml''')
write_basic_config(save_location=cls.configPath)
__UpperCamelCase :int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase__ ( cls) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Any = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''glue_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[str] = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
__UpperCamelCase :Dict = get_results(__lowercase)
self.assertLess(result['''perplexity'''] , 100)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''clm_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Union[str, Any] = get_results(__lowercase)
self.assertLess(result['''perplexity'''] , 42)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''mlm_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase :Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase :List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[Any] = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertLess(result['''train_loss'''] , 0.5)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''ner_no_trainer''')))
@unittest.skip(reason='''Fix me @muellerzr''')
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :int = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Any = get_results(__lowercase)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28)
self.assertGreaterEqual(result['''eval_exact'''] , 28)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''qa_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = self.get_auto_remove_tmp_dir()
__UpperCamelCase :int = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Dict = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''swag_no_trainer''')))
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Dict = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Tuple = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_rouge1'''] , 10)
self.assertGreaterEqual(result['''eval_rouge2'''] , 2)
self.assertGreaterEqual(result['''eval_rougeL'''] , 7)
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''summarization_no_trainer''')))
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Dict = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[int] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_bleu'''] , 30)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''translation_no_trainer''')))
@slow
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(__lowercase)
__UpperCamelCase :int = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Any = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :List[str] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10)
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Optional[Any] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''step_1''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''image_classification_no_trainer''')))
| 43 |
"""simple docstring"""
from math import pi, sqrt, tan
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
lowercase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
lowercase_ = (sidea + sidea + sidea) / 2
lowercase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 136 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_snake_case : List[Any] = [num for num in range(3, 100001, 2) if not is_prime(num)]
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_a = []
for num in range(len(UpperCamelCase ) ):
_a = 0
while 2 * i * i <= odd_composites[num]:
_a = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase ) == n:
return list_nums
return []
def snake_case_ ():
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 179 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
_snake_case : Tuple = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = []
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<pad>" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
"""simple docstring"""
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple ) -> str:
"""simple docstring"""
_a = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
_a = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> str:
"""simple docstring"""
_a = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase_ )
_a = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a = []
_a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
_a = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(lowerCAmelCase_ ) )
else:
_a = ''''''.join(lowerCAmelCase_ )
_a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 179 | 1 |
a_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def a__ ( _UpperCamelCase : float ):
assert type(_UpperCamelCase ) in (int, float) and decimal == int(_UpperCamelCase )
__lowerCamelCase = int(_UpperCamelCase )
__lowerCamelCase = ''''''
__lowerCamelCase = False
if decimal < 0:
__lowerCamelCase = True
decimal *= -1
while decimal > 0:
__lowerCamelCase ,__lowerCamelCase = divmod(_UpperCamelCase ,16 )
__lowerCamelCase = values[remainder] + hexadecimal
__lowerCamelCase = '''0x''' + hexadecimal
if negative:
__lowerCamelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : int ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_lowerCAmelCase = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_lowerCAmelCase = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_lowerCAmelCase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowerCAmelCase__ : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ : List[Any] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase )
return {"scores": scores}
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict=None ):
"""simple docstring"""
UpperCAmelCase_: Any = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_: int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCAmelCase_: Optional[int] = finetuning_task
UpperCAmelCase_: int = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Optional[Any] = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_: List[Any] = finetuning_task
UpperCAmelCase_: Optional[Any] = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_: Tuple = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_: Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self ,A__):
lowercase = num_of_nodes
lowercase = []
lowercase = {}
def A__ ( self ,A__ ,A__ ,A__):
self.m_edges.append([u_node, v_node, weight])
def A__ ( self ,A__):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def A__ ( self ,A__):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase = self.find_component(snake_case_)
def A__ ( self ,A__ ,A__ ,A__):
if component_size[u_node] <= component_size[v_node]:
lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_)
elif component_size[u_node] >= component_size[v_node]:
lowercase = self.find_component(snake_case_)
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_)
def A__ ( self):
lowercase = []
lowercase = 0
lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase = edge
lowercase = self.m_component[u]
lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ ,snake_case_):
lowercase = edge
lowercase = self.m_component[u]
lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ ,snake_case_ ,snake_case_)
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n')
num_of_components -= 1
lowercase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}')
def UpperCamelCase ( ):
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : int , snake_case_ : List[str]=1_3 , snake_case_ : Tuple=7 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : str=9_9 , snake_case_ : Dict=6_4 , snake_case_ : Any=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : str=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[Any]=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : int = is_training
__a : Optional[int] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Dict = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : List[Any] = embedding_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Any = type_vocab_size
__a : int = type_sequence_label_size
__a : int = initializer_range
__a : int = num_labels
__a : Union[str, Any] = num_choices
__a : Dict = scope
def lowerCAmelCase (self : str ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = None
__a : List[str] = None
__a : Optional[Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] ):
__a : str = MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Optional[Any] = MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : str = MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
__a : str = MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__a : Any = self.num_labels
__a : Union[str, Any] = MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : str = MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = self.num_choices
__a : List[str] = MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = config_and_inputs
__a : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MobileBertModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
__a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : str = model(snake_case_ )[0]
__a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 216 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __snake_case (unittest.TestCase ):
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Tuple=56 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : int=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Optional[int]="gelu_new" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Dict="block_sparse" , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[int]=3 , ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Union[str, Any] = use_attention_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[Any] = rescale_embeddings
_lowerCAmelCase : str = attention_type
_lowerCAmelCase : Any = use_bias
_lowerCAmelCase : str = block_size
_lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : str , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : str ):
return model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Union[str, Any] = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[Any] = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=1E-5 , _UpperCAmelCase : Optional[int]="outputs" , _UpperCAmelCase : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
| 364 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 308 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> int:
__snake_case : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCAmelCase ,config=_UpperCAmelCase )
__snake_case : List[str] = downstream_dict['projector.weight']
__snake_case : int = downstream_dict['projector.bias']
__snake_case : str = downstream_dict['model.post_net.linear.weight']
__snake_case : Tuple = downstream_dict['model.post_net.linear.bias']
return model
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[str] ) -> str:
__snake_case : List[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCAmelCase ,config=_UpperCAmelCase )
__snake_case : Optional[Any] = downstream_dict['model.linear.weight']
__snake_case : Union[str, Any] = downstream_dict['model.linear.bias']
return model
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ) -> Tuple:
__snake_case : Dict = UniSpeechSatForXVector.from_pretrained(_UpperCAmelCase ,config=_UpperCAmelCase )
__snake_case : List[Any] = downstream_dict['connector.weight']
__snake_case : int = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case : List[str] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case : int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__snake_case : Any = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__snake_case : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__snake_case : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
__snake_case : int = torch.load(_UpperCAmelCase ,map_location='cpu' )
__snake_case : Union[str, Any] = checkpoint['Downstream']
__snake_case : List[Any] = UniSpeechSatConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,do_normalize=_UpperCAmelCase )
__snake_case : Any = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__snake_case : Optional[Any] = convert_classification(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
elif arch.endswith('ForAudioFrameClassification' ):
__snake_case : Optional[int] = convert_diarization(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
elif arch.endswith('ForXVector' ):
__snake_case : Tuple = convert_xvector(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case : Optional[Any] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
A__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[str, float]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[str, str]:
'''simple docstring'''
__lowerCamelCase : Any = random.randint(0 , len(_lowerCamelCase ) - 1 )
__lowerCamelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: list[str] ) -> str:
'''simple docstring'''
__lowerCamelCase : Tuple = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCamelCase : Tuple = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: tuple[str, float] , _lowerCamelCase: list[tuple[str, float]] , _lowerCamelCase: list[str] , ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : Any = []
# Generate more children proportionally to the fitness score.
__lowerCamelCase : Any = int(parent_a[1] * 100 ) + 1
__lowerCamelCase : List[str] = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
__lowerCamelCase : Any = population_score[random.randint(0 , _lowerCamelCase )][0]
__lowerCamelCase , __lowerCamelCase : Tuple = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: list[str] , _lowerCamelCase: bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__lowerCamelCase : List[str] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCamelCase : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCamelCase : Dict = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCamelCase )
# Generate random starting population.
__lowerCamelCase : List[str] = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCamelCase , __lowerCamelCase : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCamelCase : Union[str, Any] = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__lowerCamelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCamelCase : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
__lowerCamelCase : Tuple = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__A = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__A = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__A, __A, __A = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
) | 135 | """simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = '''pt'''
elif is_tf_available():
__A = '''tf'''
else:
__A = '''jax'''
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = PerceiverTokenizer
snake_case__ = False
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
__lowerCamelCase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCamelCase__ ( self : List[Any] , **UpperCAmelCase : str ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : str=20 , UpperCAmelCase : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase : Dict = []
for i in range(len(UpperCAmelCase ) ):
try:
__lowerCamelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Any = list(filter(lambda UpperCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase ) )
__lowerCamelCase : str = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__lowerCamelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Optional[int] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__lowerCamelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__lowerCamelCase : Optional[int] = " " + output_txt
__lowerCamelCase : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.perceiver_tokenizer
__lowerCamelCase : Optional[int] = "Unicode €."
__lowerCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase )
__lowerCamelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : List[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]Unicode €.[SEP]" )
__lowerCamelCase : Optional[Any] = tokenizer("e è é ê ë" )
__lowerCamelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__lowerCamelCase : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCamelCase : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__lowerCamelCase : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCAmelCase )
self.assertIn("attention_mask" , UpperCAmelCase )
self.assertNotIn("decoder_input_ids" , UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
__lowerCamelCase : int = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCamelCase__ ( self : str ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__lowerCamelCase : Any = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__lowerCamelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__lowerCamelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__lowerCamelCase : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Optional[int] = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Any = json.load(UpperCAmelCase )
__lowerCamelCase : Tuple = [F"""<extra_id_{i}>""" for i in range(125 )]
__lowerCamelCase : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
__lowerCamelCase : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : List[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase )]
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCamelCase__ ( self : List[str] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCamelCase : List[str] = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__lowerCamelCase : Any = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) | 135 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( _lowerCAmelCase ):
def __snake_case (self ) -> int:
UpperCAmelCase_: Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """depth_multiplier""" ) )
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.2_5, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_="relu6", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: List[str] = parent
UpperCAmelCase_: Dict = batch_size
UpperCAmelCase_: Union[str, Any] = num_channels
UpperCAmelCase_: Optional[Any] = image_size
UpperCAmelCase_: List[str] = depth_multiplier
UpperCAmelCase_: int = min_depth
UpperCAmelCase_: Optional[int] = tf_padding
UpperCAmelCase_: Optional[int] = int(last_hidden_size * depth_multiplier )
UpperCAmelCase_: Union[str, Any] = output_stride
UpperCAmelCase_: Dict = hidden_act
UpperCAmelCase_: Optional[Any] = classifier_dropout_prob
UpperCAmelCase_: Union[str, Any] = use_labels
UpperCAmelCase_: Any = is_training
UpperCAmelCase_: List[str] = num_labels
UpperCAmelCase_: Dict = initializer_range
UpperCAmelCase_: Optional[Any] = scope
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: int = None
UpperCAmelCase_: List[Any] = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase_: List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase_: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case (self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: str = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = self.num_labels
UpperCAmelCase_: Optional[int] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[str] = config_and_inputs
UpperCAmelCase_: Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MobileNetVaModelTester(self )
UpperCAmelCase_: Tuple = MobileNetVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __snake_case (self ) -> List[Any]:
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __snake_case (self ) -> int:
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __snake_case (self ) -> Tuple:
pass
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = outputs.hidden_states
UpperCAmelCase_: Optional[int] = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Optional[int] = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> List[Any]:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = self.default_image_processor
UpperCAmelCase_: int = prepare_img()
UpperCAmelCase_: List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: List[str] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase_: Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 147 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any]=[] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = size[0] - overlap_pixels * 2
UpperCAmelCase_: Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase_: Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
UpperCAmelCase_: Optional[int] = np.pad(lowerCAmelCase__ , mode="""linear_ramp""" , pad_width=lowerCAmelCase__ , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase_: List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase_: Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase_: Optional[int] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase_: int = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: [int] , lowerCAmelCase__: [int] ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: int , lowerCAmelCase__: [int] ):
"""simple docstring"""
UpperCAmelCase_: str = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase_: int = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase__ , (original_slice, 0) )
return result
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase_: Optional[int] = tile.crop(lowerCAmelCase__ )
return tile
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = n % d
return n - divisor
class _a ( _lowerCAmelCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 350, ) -> str:
super().__init__(
vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, unet=SCREAMING_SNAKE_CASE_, low_res_scheduler=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, max_noise_level=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_: Dict = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
UpperCAmelCase_: Tuple = add_overlap_rect(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, image.size )
UpperCAmelCase_: List[str] = image.crop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase_: List[Any] = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase_: str = max(0, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = squeeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = to_input.size
UpperCAmelCase_: Any = to_input.resize((tile_size, tile_size), Image.BICUBIC )
UpperCAmelCase_: str = super(SCREAMING_SNAKE_CASE_, self ).__call__(image=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).images[0]
UpperCAmelCase_: Optional[int] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
UpperCAmelCase_: int = unsqueeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
UpperCAmelCase_: Union[str, Any] = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase_: Tuple = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=SCREAMING_SNAKE_CASE_ ), mode="""L""", )
final_image.paste(
SCREAMING_SNAKE_CASE_, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 75, SCREAMING_SNAKE_CASE_ = 9.0, SCREAMING_SNAKE_CASE_ = 50, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 32, ) -> Dict:
UpperCAmelCase_: int = Image.new("""RGB""", (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase_: str = math.ceil(image.size[0] / tile_size )
UpperCAmelCase_: int = math.ceil(image.size[1] / tile_size )
UpperCAmelCase_: Dict = tcx * tcy
UpperCAmelCase_: Optional[Any] = 0
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
self._process_tile(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prompt=SCREAMING_SNAKE_CASE_, num_inference_steps=SCREAMING_SNAKE_CASE_, guidance_scale=SCREAMING_SNAKE_CASE_, noise_level=SCREAMING_SNAKE_CASE_, negative_prompt=SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_, eta=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, latents=SCREAMING_SNAKE_CASE_, )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase_: Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase_: str = pipe.to("""cuda""" )
UpperCAmelCase_: List[str] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCAmelCase__: Dict ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase_: Optional[int] = pipe(image=lowerCAmelCase__ , prompt="""Black font, white background, vector""" , noise_level=4_0 , callback=lowerCAmelCase__ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 147 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase_ = get_tests_dir('''fixtures''')
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 5_00
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase_ ) as mock_head:
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ (self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __A( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ (cls ):
UpperCamelCase__ = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def UpperCAmelCase_ (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def UpperCAmelCase_ (self ):
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="""test-feature-extractor""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCAmelCase_ (self ):
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase__ = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 357 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( __a : Any ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"""a""": 1, """b""": 2}
UpperCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"""a""": 2, """b""": 3}
UpperCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 178 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_tool('''text-classification''' )
self.tool.setup()
lowerCamelCase_ =load_tool('''text-classification''', remote=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
| 75 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowercase ( snake_case_ : List[str] ,snake_case_ : str ,snake_case_ : str ,snake_case_ : int = 8 ,snake_case_ : str = DEFAULT_DEVICE ,snake_case_ : Any=False ,snake_case_ : Tuple="summarization" ,snake_case_ : Any=None ,**snake_case_ : Optional[int] ,) ->Dict:
'''simple docstring'''
__A : str = Path(snake_case_ ).open('''w''' ,encoding='''utf-8''' )
__A : Optional[int] = str(snake_case_ )
__A : List[str] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).to(snake_case_ )
if fpaa:
__A : Optional[int] = model.half()
__A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__A : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(snake_case_ ,snake_case_ )
if prefix is None:
__A : List[Any] = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case_ ,snake_case_ ) ) ):
__A : int = [prefix + text for text in examples_chunk]
__A : Dict = tokenizer(snake_case_ ,return_tensors='''pt''' ,truncation=snake_case_ ,padding='''longest''' ).to(snake_case_ )
__A : List[Any] = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**snake_case_ ,)
__A : List[Any] = tokenizer.batch_decode(snake_case_ ,skip_special_tokens=snake_case_ ,clean_up_tokenization_spaces=snake_case_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__A : Dict = int(time.time() - start_time ) # seconds
__A : str = len(snake_case_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def __lowercase ( ) ->List[str]:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def __lowercase ( snake_case_ : Dict=True ) ->Optional[int]:
'''simple docstring'''
__A : Any = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=snake_case_ ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=snake_case_ ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=snake_case_ ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=snake_case_ ,required=snake_case_ ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=snake_case_ ,required=snake_case_ ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=snake_case_ ,required=snake_case_ ,default=snake_case_ ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=snake_case_ ,required=snake_case_ ,default=snake_case_ ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=snake_case_ ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=snake_case_ ,default=8 ,required=snake_case_ ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=snake_case_ ,default=-1 ,required=snake_case_ ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=snake_case_ ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__A , __A : List[Any] = parser.parse_known_args()
__A : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__A : List[str] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__A : Dict = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__A : Optional[Any] = generate_summaries_or_translations(
snake_case_ ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**snake_case_ ,)
if args.reference_path is None:
return {}
# Compute scores
__A : Any = calculate_bleu if '''translation''' in args.task else calculate_rouge
__A : Any = [x.rstrip() for x in open(args.save_path ).readlines()]
__A : List[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case_ )]
__A : dict = score_fn(snake_case_ ,snake_case_ )
scores.update(snake_case_ )
if args.dump_args:
scores.update(snake_case_ )
if args.info:
__A : Union[str, Any] = args.info
if verbose:
print(snake_case_ )
if args.score_path is not None:
json.dump(snake_case_ ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 291 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case_ : int ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(snake_case_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ) ->Iterator[int]:
'''simple docstring'''
__A : int = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def __lowercase ( snake_case_ : int = 2000000 ) ->int:
'''simple docstring'''
return sum(takewhile(lambda snake_case_ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 291 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :Union[str, Any] , a :List[str] , a :List[str] ) -> int:
a = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a )
a = downstream_dict['''projector.weight''']
a = downstream_dict['''projector.bias''']
a = downstream_dict['''model.post_net.linear.weight''']
a = downstream_dict['''model.post_net.linear.bias''']
return model
def _a ( a :Any , a :str , a :List[Any] ) -> Tuple:
a = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a )
a = downstream_dict['''model.linear.weight''']
a = downstream_dict['''model.linear.bias''']
return model
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> Dict:
a = UniSpeechSatForXVector.from_pretrained(a , config=a )
a = downstream_dict['''connector.weight''']
a = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
a = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _a ( a :str , a :List[Any] , a :Optional[int] , a :Any ) -> str:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint['''Downstream''']
a = UniSpeechSatConfig.from_pretrained(a )
a = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
a = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
a = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
a = convert_xvector(a , a , a )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 1 |
from __future__ import annotations
def UpperCamelCase ( _a , _a ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((lowercase_) , (lowercase_)) :Dict = extended_euclid(_a , a % b )
lowercase_ :Any = a // b
return (y, x - k * y)
def UpperCamelCase ( _a , _a , _a , _a ) -> int:
'''simple docstring'''
((lowercase_) , (lowercase_)) :Union[str, Any] = extended_euclid(_a , _a )
lowercase_ :Optional[Any] = na * na
lowercase_ :Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
((lowercase_) , (lowercase_)) :Union[str, Any] = extended_euclid(_a , _a )
if b < 0:
lowercase_ :Dict = (b % n + n) % n
return b
def UpperCamelCase ( _a , _a , _a , _a ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ :str = invert_modulo(_a , _a ), invert_modulo(_a , _a )
lowercase_ :Optional[int] = na * na
lowercase_ :Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 252 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ :Any = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
lowercase_ :str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = BeautifulSoup(UpperCamelCase_ , '''html.parser''' )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = []
lowercase_ :List[Any] = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ :Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
lowercase_ , lowercase_ :Tuple = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = ''''''
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , UpperCamelCase_ ):
lowercase_ :Dict = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
lowercase_ :Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
lowercase_ :List[Any] = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
lowercase_ :Dict = [html_strings]
# Get nodes + xpaths
lowercase_ :List[Any] = []
lowercase_ :List[str] = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ :List[str] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
lowercase_ :str = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
lowercase_ :int = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase_ :Optional[int] = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 252 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A : List[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="facebook/mbart-large-en-ro" , lowerCAmelCase_=False , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )['model']
remove_ignore_keys_(A__ )
__SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
__SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__SCREAMING_SNAKE_CASE = 'relu'
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : List[str] = parser.parse_args()
a__ : Any = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 54 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : Tuple = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = BartTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : Any = getattr(__UpperCamelCase , pre_tok_state.pop('type' ) )
snake_case__ : List[str] = add_prefix_space
snake_case__ : Any = pre_tok_class(**__UpperCamelCase )
snake_case__ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : Dict = 'post_processor'
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
snake_case__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : List[Any] = tuple(state['sep'] )
if "cls" in state:
snake_case__ : List[str] = tuple(state['cls'] )
snake_case__ : int = False
if state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = True
if state.get('trim_offsets' , __UpperCamelCase ) != trim_offsets:
snake_case__ : Dict = trim_offsets
snake_case__ : List[Any] = True
if changes_to_apply:
snake_case__ : Union[str, Any] = getattr(__UpperCamelCase , state.pop('type' ) )
snake_case__ : int = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def __a ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
snake_case__ : Optional[Any] = value
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : str = kwargs.get('is_split_into_words' , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : Optional[int] = kwargs.get('is_split_into_words' , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 143 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a__ :
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['prompt']
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
if "image" in inputs:
__a = inputs['image']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['mask_image']
else:
__a = None
if "original_image" in inputs:
__a = inputs['original_image']
else:
__a = None
__a , __a = pipe.encode_prompt(UpperCAmelCase )
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
| 197 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Dict = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : Optional[int] = """dinat"""
__lowerCAmelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[int] , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=64 , _lowerCamelCase : List[Any]=[3, 4, 6, 5] , _lowerCamelCase : Dict=[2, 4, 8, 16] , _lowerCamelCase : str=7 , _lowerCamelCase : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowerCamelCase : Any=3.0 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : str=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : List[Any] = patch_size
A_ : Dict = num_channels
A_ : Optional[Any] = embed_dim
A_ : List[Any] = depths
A_ : Tuple = len(_lowerCamelCase )
A_ : str = num_heads
A_ : Optional[Any] = kernel_size
A_ : str = dilations
A_ : List[str] = mlp_ratio
A_ : Union[str, Any] = qkv_bias
A_ : int = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : int = drop_path_rate
A_ : Any = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : Dict = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : Optional[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = layer_scale_init_value
A_ : Union[str, Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ , A_ : List[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 167 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = ['model.decoder.embed_positions.weights']
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if "emb" in name:
A_ : Tuple = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
A_ : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
A_ : Optional[Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
A_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
A_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
A_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
A_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
A_ : Dict = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
A_ : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
A_ : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : Tuple = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = list(state_dict.keys() )
A_ : List[Any] = {}
for key in keys:
A_ : List[str] = state_dict.pop(_UpperCAmelCase )
A_ : Tuple = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Any = val[:hidden_size, :]
A_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if checkpoint == "small":
# default config values
A_ : Optional[Any] = 1024
A_ : Tuple = 24
A_ : int = 16
elif checkpoint == "medium":
A_ : Any = 1536
A_ : Union[str, Any] = 48
A_ : List[Any] = 24
elif checkpoint == "large":
A_ : Optional[int] = 2048
A_ : Optional[int] = 48
A_ : Tuple = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A_ : Tuple = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
"""simple docstring"""
A_ : Any = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A_ : str = decoder_config_from_checkpoint(_UpperCAmelCase )
A_ : Optional[int] = fairseq_model.lm.state_dict()
A_ , A_ : str = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : List[str] = TaEncoderModel.from_pretrained('''t5-base''' )
A_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
A_ : Union[str, Any] = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : Tuple = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
A_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
A_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
A_ : Optional[int] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A_ : Tuple = 2048
A_ : Union[str, Any] = 2048
# set other default generation config params
A_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__lowercase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
for attribute in key.split("." ):
a : int = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
a : List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
a : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
a : Dict = value
elif weight_type == "weight_g":
a : Optional[int] = value
elif weight_type == "weight_v":
a : Union[str, Any] = value
elif weight_type == "bias":
a : Any = value
else:
a : str = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : List[str] = []
a : Union[str, Any] = fairseq_model.state_dict()
a : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a : Union[str, Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
a : Any = True
if "*" in mapped_key:
a : Union[str, Any] = name.split(lowerCAmelCase__ )[0].split("." )[-2]
a : Union[str, Any] = mapped_key.replace("*" , lowerCAmelCase__ )
if "weight_g" in name:
a : int = "weight_g"
elif "weight_v" in name:
a : Dict = "weight_v"
elif "bias" in name:
a : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a : List[Any] = "weight"
else:
a : str = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : Any = full_name.split("conv_layers." )[-1]
a : Tuple = name.split("." )
a : Optional[int] = int(items[0] )
a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
a : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
a : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
a : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
a : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=None , A_=None , A_=True )-> Any:
'''simple docstring'''
if config_path is not None:
a : List[str] = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
a : Optional[Any] = UniSpeechSatConfig()
a : int = ""
if is_finetuned:
a : int = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
a : Optional[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
a , a , a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
a : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowercase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 366 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
def get_matched_characters(A_ , A_ ) -> str:
a : Optional[int] = []
a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : int = int(max(0 , i - limit ) )
a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
a : Tuple = get_matched_characters(A_ , A_ )
a : str = get_matched_characters(A_ , A_ )
a : List[str] = len(A_ )
# transposition
a : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2
)
if not match_count:
a : Tuple = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 226 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small',return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase = tokenizer('Hello there',return_tensors='pt' ).input_ids
__UpperCamelCase = tokenizer('Hi I am',return_tensors='pt' ).input_ids
__UpperCamelCase = model(input_ids.to(SCREAMING_SNAKE_CASE_ ),labels=labels.to(SCREAMING_SNAKE_CASE_ ) ).loss
__UpperCamelCase = -(labels.shape[-1] * loss.item())
__UpperCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 310 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : str = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Optional[Any] = relative_attention
UpperCAmelCase_ : Optional[Any] = position_biased_input
UpperCAmelCase_ : str = pos_att_type
UpperCAmelCase_ : Optional[Any] = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = TFDebertaVaModel(config=snake_case_ )
UpperCAmelCase_ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase_ : Optional[int] = [input_ids, input_mask]
UpperCAmelCase_ : Dict = model(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = TFDebertaVaForMaskedLM(config=snake_case_ )
UpperCAmelCase_ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase_ : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.num_labels
UpperCAmelCase_ : str = TFDebertaVaForSequenceClassification(config=snake_case_ )
UpperCAmelCase_ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase_ : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : Optional[Any] = TFDebertaVaForTokenClassification(config=snake_case_ )
UpperCAmelCase_ : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase_ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = TFDebertaVaForQuestionAnswering(config=snake_case_ )
UpperCAmelCase_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase_ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ :Union[str, Any] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ :List[str] = False
lowerCamelCase_ :Tuple = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFDebertaVaModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(snake_case_ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
UpperCAmelCase_ : Tuple = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase_ : Optional[int] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ : Any = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCAmelCase_ : List[str] = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 )
| 274 | '''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : List[str] = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , use_stable_embedding=snake_case_ , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = OpenLlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : str = model(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Dict = OpenLlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
UpperCAmelCase_ : Any = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
UpperCAmelCase_ : Tuple = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
UpperCAmelCase_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
UpperCAmelCase_ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ :Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ :Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ :str = False
lowerCamelCase_ :Optional[int] = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = OpenLlamaModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Union[str, Any] = input_dict['input_ids']
UpperCAmelCase_ : int = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Any = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : str = 'single_label_classification'
UpperCAmelCase_ : List[str] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : int = 'multi_label_classification'
UpperCAmelCase_ : Dict = input_dict['input_ids']
UpperCAmelCase_ : int = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = ids_tensor([1, 1_0] , config.vocab_size )
UpperCAmelCase_ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Tuple = OpenLlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
UpperCAmelCase_ : List[str] = original_model(snake_case_ ).last_hidden_state
UpperCAmelCase_ : Any = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Tuple = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase_ : List[str] = OpenLlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
UpperCAmelCase_ : Tuple = scaled_model(snake_case_ ).last_hidden_state
UpperCAmelCase_ : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 274 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( __lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_config()
_snake_case = 3_00
return config
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = DebertaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_snake_case = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = DebertaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DebertaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DebertaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = DebertaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = True
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DebertaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = DebertaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DebertaModel.from_pretrained('microsoft/deberta-base' )
_snake_case = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_snake_case = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 42 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """blip_2_vision_model"""
def __init__( self , snake_case=1408 , snake_case=6144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=0.00_001 , snake_case=0.0 , snake_case=1E-10 , snake_case=True , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """blip_2_qformer"""
def __init__( self , snake_case=3_0522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1408 , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = cross_attention_frequency
lowercase = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """blip-2"""
_UpperCamelCase : str = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ):
super().__init__(**snake_case )
if vision_config is None:
lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowercase = BlipaVisionConfig(**snake_case )
lowercase = BlipaQFormerConfig(**snake_case )
lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase = CONFIG_MAPPING[text_model_type](**snake_case )
lowercase = self.text_config.tie_word_embeddings
lowercase = self.text_config.is_encoder_decoder
lowercase = num_query_tokens
lowercase = self.vision_config.hidden_size
lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase = 1.0
lowercase = 0.02
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , snake_case , **snake_case , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.vision_config.to_dict()
lowercase = self.qformer_config.to_dict()
lowercase = self.text_config.to_dict()
lowercase = self.__class__.model_type
return output
| 195 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.feature_extractor
snake_case_ = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "bias" in name:
snake_case_ = '''bias'''
elif "weight" in name:
snake_case_ = '''weight'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = full_name.split('''adaptor.''' )[-1]
snake_case_ = name.split('''.''' )
if items[1].isdigit():
snake_case_ = int(items[1] )
else:
snake_case_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
snake_case_ = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
snake_case_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
snake_case_ = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
snake_case_ = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
snake_case_ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
snake_case_ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
snake_case_ = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )-> Optional[int]:
"""simple docstring"""
snake_case_ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
snake_case_ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
snake_case_ = model[0].eval()
# load feature extractor
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
snake_case_ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
snake_case_ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case_ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
snake_case_ = False
snake_case_ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case_ = hf_wavavec.config.to_dict()
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer.eos_token_id
snake_case_ = '''mbart50'''
snake_case_ = '''wav2vec2'''
snake_case_ = tokenizer.eos_token_id
snake_case_ = 25_0004
snake_case_ = tokenizer.eos_token_id
snake_case_ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 267 |
from __future__ import annotations
import time
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = [self.start]
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(_UpperCAmelCase )
snake_case_ = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.fwd_bfs.retrace_path(_UpperCAmelCase )
snake_case_ = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = BreadthFirstSearch(init, goal)
UpperCAmelCase = bfs.search()
UpperCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase = bd_bfs.search()
UpperCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time) | 267 | 1 |
"""simple docstring"""
import numpy as np
def _snake_case ( UpperCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def _snake_case ( UpperCamelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a__ ( __lowercase ):
def lowercase ( self : List[Any] ) -> List[str]:
lowercase : List[str] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase ( self : Dict ) -> int:
with self.assertRaises(_a ):
lowercase : Any = pa.array(TypedSequence([1, 2, 3] ), type=pa.intaa() )
def lowercase ( self : List[Any] ) -> Optional[int]:
with self.assertRaises(_a ):
lowercase : Union[str, Any] = pa.array(TypedSequence([1, 2, 3], try_type=Value('bool' ), type=Value('int64' ) ) )
def lowercase ( self : Any ) -> Any:
lowercase : Dict = pa.array(TypedSequence([1, 2, 3], type=Value('int32' ) ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase ( self : str ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase : Optional[int] = pa.array(TypedSequence(['foo', 'bar'], type=Value('int64' ) ) )
def lowercase ( self : Any ) -> Dict:
lowercase : str = pa.array(TypedSequence([1, 2, 3], try_type=Value('int32' ) ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase ( self : Tuple ) -> int:
lowercase : Optional[int] = pa.array(TypedSequence(['foo', 'bar'], try_type=Value('int64' ) ) )
self.assertEqual(arr.type, pa.string() )
def lowercase ( self : Dict ) -> Tuple:
lowercase : Dict = pa.array(TypedSequence([[[1, 2, 3]]], type=ArrayaD((1, 3), 'int64' ) ) )
self.assertEqual(arr.type, ArrayaDExtensionType((1, 3), 'int64' ) )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase : Tuple = pa.array(TypedSequence(['foo', 'bar'], type=ArrayaD((1, 3), 'int64' ) ) )
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : int = pa.array(TypedSequence([[[1, 2, 3]]], try_type=ArrayaD((1, 3), 'int64' ) ) )
self.assertEqual(arr.type, ArrayaDExtensionType((1, 3), 'int64' ) )
def lowercase ( self : Optional[Any] ) -> List[str]:
lowercase : str = pa.array(TypedSequence(['foo', 'bar'], try_type=ArrayaD((1, 3), 'int64' ) ) )
self.assertEqual(arr.type, pa.string() )
@require_pil
def lowercase ( self : Any ) -> int:
import PIL.Image
lowercase : Tuple = PIL.Image.fromarray(np.arange(10, dtype=np.uinta ).reshape(2, 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects', side_effect=_a ) as mock_cast_to_python_objects:
lowercase : Tuple = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image], type=Image() ) )
lowercase , lowercase : str = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting', _a )
self.assertFalse(kwargs['optimize_list_casting'] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowercase : int = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowercase : Dict = pa.ipc.open_stream(__a )
lowercase : Any = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowercase : Tuple = pa.BufferOutputStream()
lowercase : Optional[int] = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
lowercase , lowercase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : Tuple = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase__ ( ) -> Union[str, Any]:
lowercase : int = pa.BufferOutputStream()
lowercase : Union[str, Any] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
lowercase , lowercase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase : str = pa.BufferReader(output.getvalue() )
lowercase : List[Any] = pa.ipc.open_stream(__a )
lowercase : List[Any] = f.read_all()
lowercase : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def lowercase__ ( _UpperCAmelCase ) -> Dict:
lowercase : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt='split_name' , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
lowercase , lowercase : Any = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
lowercase : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt='split_name' , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
lowercase , lowercase : int = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
lowercase : Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt='split_name' , check_duplicates=__a , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
lowercase , lowercase : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowercase : List[Any] = pa.BufferOutputStream()
lowercase : Any = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
lowercase , lowercase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowercase : Any = pa.BufferOutputStream()
lowercase : str = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
lowercase , lowercase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowercase : Any = pa.BufferOutputStream()
lowercase : List[Any] = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
lowercase , lowercase : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : Optional[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase__ ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[str] = {'col_1': pa.string(), 'col_2': pa.intaa()}
lowercase : Optional[int] = os.path.join(__a , 'test.arrow' )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
lowercase , lowercase : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def lowercase__ ( _UpperCAmelCase ) -> Tuple:
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowercase : str = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowercase : Union[str, Any] = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
# in range
lowercase : Optional[Any] = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase : Optional[Any] = copy.deepcopy(__a )
lowercase : Union[str, Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowercase : Any = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowercase : Any = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
lowercase : int = 'mock://dataset-train.arrow'
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
lowercase , lowercase : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def lowercase__ ( ) -> Optional[Any]:
lowercase : List[str] = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
lowercase , lowercase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase : str = pa.BufferReader(output.getvalue() )
lowercase : str = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
import PIL.Image
lowercase : List[str] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format='png' )
lowercase : Optional[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({'image': Image()} ) , embed_local_files=__a ) as writer:
writer.write({'image': image_path} )
writer.finalize()
lowercase : List[Any] = pa.BufferReader(output.getvalue() )
lowercase : List[str] = pq.read_table(__a )
lowercase : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , __a )
with open(__a , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase__ ( ) -> int:
lowercase : Optional[Any] = pa.schema([pa.field('col_1' , pa.string() , nullable=__a )] )
lowercase : int = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 350 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = np.inf
def set_batch_size(_UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Any = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Dict = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary":
lowercase : int = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCAmelCase , _UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : NestedDataStructureLike[PathLike], lowerCAmelCase : Optional[NamedSplit] = None, lowerCAmelCase : Optional[Features] = None, lowerCAmelCase : str = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : int, ) -> List[Any]:
super().__init__(
lowerCAmelCase, split=lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, streaming=lowerCAmelCase, num_proc=lowerCAmelCase, **lowerCAmelCase, )
lowercase : str = path_or_paths if isinstance(lowerCAmelCase, lowerCAmelCase ) else {self.split: path_or_paths}
lowercase : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowercase : Optional[int] = Parquet(
cache_dir=lowerCAmelCase, data_files=lowerCAmelCase, features=lowerCAmelCase, hash=lowerCAmelCase, **lowerCAmelCase, )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
lowercase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase : Tuple = None
lowercase : Union[str, Any] = None
lowercase : List[Any] = None
lowercase : int = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, num_proc=self.num_proc, )
lowercase : Any = self.builder.as_dataset(
split=self.split, verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : Dict, lowerCAmelCase : Dataset, lowerCAmelCase : Union[PathLike, BinaryIO], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : Optional[Any], ) -> Optional[Any]:
lowercase : List[Any] = dataset
lowercase : int = path_or_buf
lowercase : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
lowercase : Optional[Any] = parquet_writer_kwargs
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, 'wb+' ) as buffer:
lowercase : int = self._write(file_obj=lowerCAmelCase, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
else:
lowercase : List[Any] = self._write(file_obj=self.path_or_buf, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
return written
def lowercase ( self : int, lowerCAmelCase : BinaryIO, lowerCAmelCase : int, **lowerCAmelCase : Union[str, Any] ) -> int:
lowercase : Optional[Any] = 0
lowercase : int = parquet_writer_kwargs.pop('path_or_buf', lowerCAmelCase )
lowercase : List[str] = self.dataset.features.arrow_schema
lowercase : int = pq.ParquetWriter(lowerCAmelCase, schema=lowerCAmelCase, **lowerCAmelCase )
for offset in logging.tqdm(
range(0, len(self.dataset ), lowerCAmelCase ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ):
lowercase : Tuple = query_table(
table=self.dataset._data, key=slice(lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 53 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCAmelCase (_lowerCAmelCase):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _lowerCAmelCase (_lowerCAmelCase):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ = ord(_lowerCAmelCase)
if not _is_chinese_char(_lowerCAmelCase):
return 0
return 1
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = set()
for token in tokens:
UpperCamelCase_ = len(_lowerCAmelCase) > 1 and is_chinese(_lowerCAmelCase)
if chinese_word:
word_set.add(_lowerCAmelCase)
UpperCamelCase_ = list(_lowerCAmelCase)
return word_list
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ = max([len(_lowerCAmelCase) for w in chinese_word_set])
UpperCamelCase_ = bert_tokens
UpperCamelCase_ , UpperCamelCase_ = 0, len(_lowerCAmelCase)
while start < end:
UpperCamelCase_ = True
if is_chinese(bert_word[start]):
UpperCamelCase_ = min(end - start , _lowerCAmelCase)
for i in range(_lowerCAmelCase , 1 , -1):
UpperCamelCase_ = "".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
UpperCamelCase_ = "##" + bert_word[j]
UpperCamelCase_ = start + i
UpperCamelCase_ = False
break
if single_word:
start += 1
return bert_word
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for i in range(0 , len(_lowerCAmelCase) , 1_00):
UpperCamelCase_ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"]).cws
UpperCamelCase_ = [get_chinese_word(_lowerCAmelCase) for r in res]
ltp_res.extend(_lowerCAmelCase)
assert len(_lowerCAmelCase) == len(_lowerCAmelCase)
UpperCamelCase_ = []
for i in range(0 , len(_lowerCAmelCase) , 1_00):
UpperCamelCase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=5_12)
bert_res.extend(res["input_ids"])
assert len(_lowerCAmelCase) == len(_lowerCAmelCase)
UpperCamelCase_ = []
for input_ids, chinese_word in zip(_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for id in input_ids:
UpperCamelCase_ = bert_tokenizer._convert_id_to_token(_lowerCAmelCase)
input_tokens.append(_lowerCAmelCase)
UpperCamelCase_ = add_sub_symbol(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase):
if token[:2] == "##":
UpperCamelCase_ = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase) == 1 and _is_chinese_char(ord(_lowerCAmelCase)):
ref_id.append(_lowerCAmelCase)
ref_ids.append(_lowerCAmelCase)
assert len(_lowerCAmelCase) == len(_lowerCAmelCase)
return ref_ids
def _lowerCAmelCase (_lowerCAmelCase):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8") as f:
UpperCamelCase_ = f.readlines()
UpperCamelCase_ = [line.strip() for line in data if len(_lowerCAmelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ = LTP(args.ltp) # faster in GPU device
UpperCamelCase_ = BertTokenizer.from_pretrained(args.bert)
UpperCamelCase_ = prepare_ref(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
with open(args.save_path , "w" , encoding="utf-8") as f:
UpperCamelCase_ = [json.dumps(_lowerCAmelCase) + "\n" for ref in ref_ids]
f.writelines(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[int] =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase : List[str] =parser.parse_args()
main(args)
| 128 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase : Dict =TypeVar("""T""")
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = data
UpperCamelCase_ = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase_ = None
def __iter__( self ):
'''simple docstring'''
UpperCamelCase_ = self.top
while node:
yield node.data
UpperCamelCase_ = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(snake_case__ ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.top is None
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = Node(snake_case__ )
if not self.is_empty():
UpperCamelCase_ = self.top
UpperCamelCase_ = node
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , snake_case__ )
UpperCamelCase_ = self.top
UpperCamelCase_ = self.top.next
return pop_node.data
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 128 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 351 |
'''simple docstring'''
import math
__snake_case : List[Any] = 10
__snake_case : Dict = 7
__snake_case : str = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCamelCase ( __snake_case : int = 20 ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =math.comb(__snake_case, __snake_case )
A__ : str =math.comb(NUM_BALLS - BALLS_PER_COLOUR, __snake_case )
A__ : Optional[int] =NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 136 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
"""simple docstring"""
UpperCAmelCase : List[Any] = range(2, 20 + 1)
UpperCAmelCase : Optional[int] = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = sum(a_i[j] for j in range(_UpperCamelCase , len(_UpperCamelCase ) ) )
__UpperCAmelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_UpperCamelCase ) , _UpperCamelCase ) ) )
__UpperCAmelCase ,__UpperCAmelCase : str = 0, 0
__UpperCAmelCase : Optional[int] = n - i
__UpperCAmelCase : Optional[int] = memo.get(_UpperCamelCase )
if sub_memo is not None:
__UpperCAmelCase : Any = sub_memo.get(_UpperCamelCase )
if jumps is not None and len(_UpperCamelCase ) > 0:
# find and make the largest jump without going over
__UpperCAmelCase : List[Any] = -1
for _k in range(len(_UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__UpperCAmelCase : List[Any] = _k
break
if max_jump >= 0:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
__UpperCAmelCase : Dict = diff + c
for j in range(min(_UpperCamelCase , len(_UpperCamelCase ) ) ):
__UpperCAmelCase ,__UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 )
if new_c > 0:
add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__UpperCAmelCase : str = []
else:
__UpperCAmelCase : Dict = {c: []}
__UpperCAmelCase : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = next_term(_UpperCamelCase , k - 1 , i + dn , _UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__UpperCAmelCase ,__UpperCAmelCase : Dict = compute(_UpperCamelCase , _UpperCamelCase , i + dn , _UpperCamelCase )
diff += _diff
dn += terms_jumped
__UpperCAmelCase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
__UpperCAmelCase : Optional[int] = 0
while j < len(_UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__UpperCAmelCase : Dict = i
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = 0, 0, 0
for j in range(len(_UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__UpperCAmelCase : Union[str, Any] = ds_c + ds_b
diff += addend
__UpperCAmelCase : Tuple = 0
for j in range(_UpperCamelCase ):
__UpperCAmelCase : int = a_i[j] + addend
__UpperCAmelCase ,__UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return diff, i - start_i
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
__UpperCAmelCase : Optional[Any] = digits[j] + addend
if s >= 1_0:
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = divmod(_UpperCamelCase , 1_0 )
__UpperCAmelCase : Union[str, Any] = addend // 1_0 + quotient
else:
__UpperCAmelCase : List[str] = s
__UpperCAmelCase : Optional[int] = addend // 1_0
if addend == 0:
break
while addend > 0:
__UpperCAmelCase ,__UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 )
digits.append(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int = 1_0**1_5 ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = [1]
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = 0
while True:
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = next_term(_UpperCamelCase , 2_0 , i + dn , _UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
__UpperCAmelCase : List[Any] = 0
for j in range(len(_UpperCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 320 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """AutoImageProcessor"""
__a = """AutoTokenizer"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
__UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 320 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase__ = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
lowerCamelCase__ = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
lowerCamelCase__ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str=None ) ->Dict:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE ) ),
}
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322 | 0 |
import numpy as np
from transformers import Pipeline
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
__lowerCamelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = {}
if "second_text" in kwargs:
__lowerCamelCase = kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
return self.tokenizer(lowerCamelCase__ , text_pair=lowerCamelCase__ , return_tensors=self.framework )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = model_outputs.logits[0].numpy()
__lowerCamelCase = softmax(lowerCamelCase__ )
__lowerCamelCase = np.argmax(lowerCamelCase__ )
__lowerCamelCase = self.model.config.idalabel[best_class]
__lowerCamelCase = probabilities[best_class].item()
__lowerCamelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 90 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = DiTPipeline
_a : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a : Tuple = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_a : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a : List[str] = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 355 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCAmelCase = (low + high) // 2
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , mid + 1 , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int , a__: int ) -> tuple[int, int, float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
_UpperCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def lowerCAmelCase__ ( a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = [randint(1 , a__ ) for _ in range(a__ )]
_UpperCAmelCase = time.time()
max_subarray(a__ , 0 , input_size - 1 )
_UpperCAmelCase = time.time()
return end - start
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '\t\t' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowerCAmelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[int] = None
@experimental
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return _map_with_joblib(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : str = num_proc if num_proc <= len(_snake_case ) else len(_snake_case )
__lowercase : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_snake_case ):
__lowercase : Any = len(_snake_case ) // num_proc
__lowercase : Optional[int] = len(_snake_case ) % num_proc
__lowercase : Any = div * index + min(_snake_case , _snake_case )
__lowercase : Dict = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(_snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(_snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
__lowercase , __lowercase : str = None, None
if not disable_tqdm:
__lowercase , __lowercase : List[Any] = (RLock(),), tqdm.set_lock
with Pool(_snake_case , initargs=_snake_case , initializer=_snake_case ) as pool:
__lowercase : Optional[Any] = pool.map(_snake_case , _snake_case )
logger.info(F'Finished {num_proc} processes' )
__lowercase : int = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(_snake_case )} objects' )
return mapped
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_snake_case ):
return joblib.Parallel()(
joblib.delayed(_snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
__lowercase : List[str] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__lowercase : Any = None
| 156 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowercase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowercase : Any = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[str]:
_snake_case = simple_accuracy(A__ , A__ )
_snake_case = float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = float(pearsonr(A__ , A__ )[0] )
_snake_case = float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '
'\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '
'\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]' )
| 351 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """SpeechT5FeatureExtractor"""
__lowercase = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('audio' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('audio_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
elif text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_values']
elif text_target is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('input_values' , lowerCAmelCase_ )
_snake_case = kwargs.pop('input_ids' , lowerCAmelCase_ )
_snake_case = kwargs.pop('labels' , lowerCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = feature_size_hack
_snake_case = targets['input_values']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ = TaTokenizerFast
A__ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 82 |
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return EnvironmentCommand()
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def snake_case ( lowerCamelCase : ArgumentParser )-> Dict:
lowerCamelCase__ : str =parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCamelCase )
download_parser.add_argument(
'''--accelerate-config_file''', default=lowerCamelCase, help='''The accelerate config file to use for the default values in the launching script.''', )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self : str, lowerCamelCase : Dict, *lowerCamelCase : Tuple )-> None:
lowerCamelCase__ : Dict =accelerate_config_file
def snake_case ( self : List[Any] )-> List[str]:
lowerCamelCase__ : List[str] ='''not installed'''
if is_safetensors_available():
import safetensors
lowerCamelCase__ : str =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowerCamelCase__ : Tuple =F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCamelCase__ : List[Any] ='''not installed'''
lowerCamelCase__ : List[Any] ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase__ : List[Any] =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCamelCase__ : List[str] =load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCamelCase__ : int =(
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase, lowerCamelCase )
else F'''\t{accelerate_config}'''
)
lowerCamelCase__ : List[str] ='''not installed'''
lowerCamelCase__ : Optional[int] ='''NA'''
if is_torch_available():
import torch
lowerCamelCase__ : str =torch.__version__
lowerCamelCase__ : List[str] =torch.cuda.is_available()
lowerCamelCase__ : Dict ='''not installed'''
lowerCamelCase__ : Any ='''NA'''
if is_tf_available():
import tensorflow as tf
lowerCamelCase__ : Tuple =tf.__version__
try:
# deprecated in v2.1
lowerCamelCase__ : Tuple =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase__ : Dict =bool(tf.config.list_physical_devices('''GPU''' ) )
lowerCamelCase__ : int ='''not installed'''
lowerCamelCase__ : Dict ='''not installed'''
lowerCamelCase__ : Dict ='''not installed'''
lowerCamelCase__ : Tuple ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase__ : Dict =flax.__version__
lowerCamelCase__ : Optional[int] =jax.__version__
lowerCamelCase__ : Dict =jaxlib.__version__
lowerCamelCase__ : int =jax.lib.xla_bridge.get_backend().platform
lowerCamelCase__ : str ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def snake_case ( lowerCamelCase : str )-> Optional[int]:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 272 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
__UpperCamelCase = AutoTokenizer.from_pretrained('xlm-roberta-base' )
__UpperCamelCase = """The dog is cute and lives in the garden house"""
__UpperCamelCase = jnp.array([tokenizer.encode(__A )] )
__UpperCamelCase = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__UpperCamelCase = model(__A )["""last_hidden_state"""]
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
| 53 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = KandinskyInpaintPipeline
UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
UpperCamelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
UpperCamelCase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase : Tuple = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_: List[str] =MultilingualCLIP(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dummy_movq
SCREAMING_SNAKE_CASE_: int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: List[str] =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_: Dict =np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: Optional[Any] =0
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu"""
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =self.pipeline_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
**self.get_dummy_inputs(lowerCAmelCase ) , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE_: List[str] =np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""a hat"""
SCREAMING_SNAKE_CASE_: str =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =pipe_prior(
lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(
lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 173 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
return max(metric_fn(lowerCamelCase__ , lowerCamelCase__ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowerCamelCase : Optional[Any] = []
if args.gold_data_mode == "qa":
__lowerCamelCase : Tuple = pd.read_csv(lowerCamelCase__ , sep='\t' , header=lowerCamelCase__ )
for answer_list in data[1]:
__lowerCamelCase : int = ast.literal_eval(lowerCamelCase__ )
answers.append(lowerCamelCase__ )
else:
__lowerCamelCase : Optional[Any] = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowerCamelCase : List[Any] = [[reference] for reference in references]
__lowerCamelCase : List[str] = 0
for prediction, ground_truths in zip(lowerCamelCase__ , lowerCamelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
fa += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = 100.0 * em / total
__lowerCamelCase : Any = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Dict = args.k
__lowerCamelCase : Any = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowerCamelCase : Any = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowerCamelCase : List[Any] = 0
for hypo, reference in zip(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = set(hypo.split('\t' )[:k] )
__lowerCamelCase : Optional[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowerCamelCase : List[str] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
def strip_title(lowerCamelCase__ ):
if title.startswith('"' ):
__lowerCamelCase : List[str] = title[1:]
if title.endswith('"' ):
__lowerCamelCase : int = title[:-1]
return title
__lowerCamelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )['input_ids'].to(args.device )
__lowerCamelCase : int = rag_model.rag.question_encoder(lowerCamelCase__ )
__lowerCamelCase : List[Any] = question_enc_outputs[0]
__lowerCamelCase : int = rag_model.retriever(
lowerCamelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowerCamelCase : int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowerCamelCase : Any = []
for docs in all_docs:
__lowerCamelCase : List[str] = [strip_title(lowerCamelCase__ ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase__ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
with torch.no_grad():
__lowerCamelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase : List[str] = inputs_dict.input_ids.to(args.device )
__lowerCamelCase : List[str] = inputs_dict.attention_mask.to(args.device )
__lowerCamelCase : str = rag_model.generate( # rag_model overwrites generate
lowerCamelCase__ , attention_mask=lowerCamelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowerCamelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
if args.print_predictions:
for q, a in zip(lowerCamelCase__ , lowerCamelCase__ ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase__ , lowerCamelCase__ ) )
return answers
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase__ , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=lowerCamelCase__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowerCamelCase : int = parser.parse_args()
__lowerCamelCase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Any = {}
if args.model_type is None:
__lowerCamelCase : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowerCamelCase : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__lowerCamelCase : List[Any] = args.n_docs
if args.index_name is not None:
__lowerCamelCase : str = args.index_name
if args.index_path is not None:
__lowerCamelCase : int = args.index_path
else:
__lowerCamelCase : int = BartForConditionalGeneration
__lowerCamelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__lowerCamelCase : Optional[Any] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowerCamelCase : Tuple = RagRetriever.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase : Dict = model_class.from_pretrained(lowerCamelCase__ , retriever=lowerCamelCase__ , **lowerCamelCase__ )
model.retriever.init_retrieval()
else:
__lowerCamelCase : Union[str, Any] = model_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowerCamelCase : List[str] = []
for line in tqdm(lowerCamelCase__ ):
questions.append(line.strip() )
if len(lowerCamelCase__ ) == args.eval_batch_size:
__lowerCamelCase : List[Any] = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write('\n'.join(lowerCamelCase__ ) + '\n' )
preds_file.flush()
__lowerCamelCase : int = []
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : int = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write('\n'.join(lowerCamelCase__ ) )
preds_file.flush()
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a =get_args()
main(args)
| 113 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase : Any = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None:
def identity_function(lowerCamelCase__ ) -> float:
return x
__lowerCamelCase : str = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
def function_to_integrate(lowerCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase : Any = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "Salesforce/blip-image-captioning-base"
_UpperCamelCase : str = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_UpperCamelCase : List[Any] = "image_captioner"
_UpperCamelCase : List[Any] = AutoModelForVisionaSeq
_UpperCamelCase : str = ["image"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ ):
return self.pre_processor(images=a__ , return_tensors="""pt""" )
def __A ( self , a__ ):
return self.model.generate(**a__ )
def __A ( self , a__ ):
return self.pre_processor.batch_decode(a__ , skip_special_tokens=a__ )[0].strip()
| 44 | """simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __magic_name__ ( _lowercase ):
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] =8
# DPR tok
_UpperCAmelCase : int =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase : Dict =os.path.join(self.tmpdirname , 'dpr_tokenizer')
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
_UpperCAmelCase : List[Any] =os.path.join(__UpperCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
# BART tok
_UpperCAmelCase : Tuple =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_UpperCAmelCase : Union[str, Any] =dict(zip(__UpperCamelCase , range(len(__UpperCamelCase))))
_UpperCAmelCase : Optional[int] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_UpperCAmelCase : Any ={'unk_token': '<unk>'}
_UpperCAmelCase : Any =os.path.join(self.tmpdirname , 'bart_tokenizer')
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
_UpperCAmelCase : Tuple =os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase : Union[str, Any] =os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__UpperCamelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__UpperCamelCase))
def lowerCAmelCase ( self) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer'))
def lowerCAmelCase ( self) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer'))
def lowerCAmelCase ( self) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer'))
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] =Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
})
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.get_dummy_dataset()
_UpperCAmelCase : Dict =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset:
_UpperCAmelCase : Tuple =dataset
_UpperCAmelCase : Optional[Any] =RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCAmelCase ( self , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.get_dummy_dataset()
_UpperCAmelCase : Any =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
_UpperCAmelCase : Union[str, Any] =os.path.join(self.tmpdirname , 'dataset')
_UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , 'index.faiss')
dataset.get_index('embeddings').save(os.path.join(self.tmpdirname , 'index.faiss'))
dataset.drop_index('embeddings')
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset'))
del dataset
_UpperCAmelCase : int =RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_UpperCAmelCase : List[Any] =RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCamelCase) , )
return retriever
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
})
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT)
_UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index')
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr')
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb'))
_UpperCAmelCase : Union[str, Any] =os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl')
_UpperCAmelCase : Any ={sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__UpperCamelCase , open(__UpperCamelCase , 'wb'))
_UpperCAmelCase : Union[str, Any] =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
_UpperCAmelCase : int =RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer())
return retriever
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =1
_UpperCAmelCase : Any =self.get_dummy_canonical_hf_index_retriever()
_UpperCAmelCase : Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCamelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title'])
self.assertEqual(len(doc_dicts[0]['id']) , __UpperCamelCase)
self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset:
_UpperCAmelCase : str =self.get_dummy_dataset()
retriever.save_pretrained(__UpperCamelCase)
_UpperCAmelCase : int =RagRetriever.from_pretrained(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
_UpperCAmelCase : Dict =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : Union[str, Any] =retriever.retrieve(__UpperCamelCase , n_docs=1)
self.assertTrue(out is not None)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =1
_UpperCAmelCase : str =self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase)
_UpperCAmelCase : Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] =retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCamelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title'])
self.assertEqual(len(doc_dicts[0]['id']) , __UpperCamelCase)
self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase)
_UpperCAmelCase : Any =RagRetriever.from_pretrained(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
_UpperCAmelCase : List[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : Tuple =retriever.retrieve(__UpperCamelCase , n_docs=1)
self.assertTrue(out is not None)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : Any =self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase)
_UpperCAmelCase : Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int =retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCamelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['embeddings', 'id', 'text', 'title'])
self.assertEqual(len(doc_dicts[0]['id']) , __UpperCamelCase)
self.assertEqual(doc_dicts[0]['id'][0] , '1') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase)
_UpperCAmelCase : List[str] =RagRetriever.from_pretrained(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
_UpperCAmelCase : Optional[int] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : Union[str, Any] =retriever.retrieve(__UpperCamelCase , n_docs=1)
self.assertTrue(out is not None)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =1
_UpperCAmelCase : List[Any] =self.get_dummy_legacy_index_retriever()
_UpperCAmelCase : Dict =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCamelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['text', 'title'])
self.assertEqual(len(doc_dicts[0]['text']) , __UpperCamelCase)
self.assertEqual(doc_dicts[0]['text'][0] , 'bar') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase)
_UpperCAmelCase : Tuple =RagRetriever.from_pretrained(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
_UpperCAmelCase : Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : Any =retriever.retrieve(__UpperCamelCase , n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
import torch
_UpperCAmelCase : Optional[Any] =1
_UpperCAmelCase : List[Any] =self.get_dummy_canonical_hf_index_retriever()
_UpperCAmelCase : Any =[[5, 7], [1_0, 1_1]]
_UpperCAmelCase : Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : Optional[Any] =retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any =(
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase)
self.assertIsInstance(__UpperCamelCase , np.ndarray)
_UpperCAmelCase : Dict =retriever(
__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase , return_tensors='pt' , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple =( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(__UpperCamelCase , torch.Tensor)
self.assertIsInstance(__UpperCamelCase , torch.Tensor)
self.assertIsInstance(__UpperCamelCase , torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_dpr_ctx_encoder_tokenizer()
_UpperCAmelCase : Dict =1
_UpperCAmelCase : str =self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase)
retriever.set_ctx_encoder_tokenizer(__UpperCamelCase)
_UpperCAmelCase : Optional[Any] =[[5, 7], [1_0, 1_1]]
_UpperCAmelCase : Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
_UpperCAmelCase : int =retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase)
self.assertEqual(
len(__UpperCamelCase) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask')) , __UpperCamelCase) # check for doc token related keys in dictionary.
| 360 |
'''simple docstring'''
lowercase =[0, 2, 4, 6, 8]
lowercase =[1, 3, 5, 7, 9]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : Union[str, Any] =0
for digit in range(1_0 ):
_UpperCAmelCase : str =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __lowerCamelCase , __lowerCamelCase )
return result
_UpperCAmelCase : Optional[Any] =0
for digita in range(1_0 ):
_UpperCAmelCase : Any =digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : Optional[int] =ODD_DIGITS
else:
_UpperCAmelCase : Union[str, Any] =EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : int =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __lowerCamelCase , __lowerCamelCase , )
return result
def lowerCamelCase__ ( __lowerCamelCase : int = 9 ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
snake_case_ : Any = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
snake_case_ : Tuple = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def A (__A : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase_ = state_dict.pop(__A )
# emb -> embedding
if name.startswith('''emb.''' ):
UpperCAmelCase_ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
UpperCAmelCase_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
UpperCAmelCase_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , __A )
# ffn -> feed_forward
UpperCAmelCase_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , __A )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
UpperCAmelCase_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
UpperCAmelCase_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
UpperCAmelCase_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
UpperCAmelCase_ = '''rwkv.''' + name
UpperCAmelCase_ = weight
return state_dict
def A (__A : Any , __A : int , __A : Optional[int] , __A : Tuple=None , __A : str=None , __A : List[Any]=False , __A : List[str]=None ) -> Dict:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
UpperCAmelCase_ = 50277
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
UpperCAmelCase_ = PreTrainedTokenizerFast(tokenizer_file=__A )
UpperCAmelCase_ = len(__A )
tokenizer.save_pretrained(__A )
# 2. Build the config
UpperCAmelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCAmelCase_ = RwkvConfig(
vocab_size=__A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__A )
# 3. Download model file then convert state_dict
UpperCAmelCase_ = hf_hub_download(__A , __A )
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = convert_state_dict(__A )
# 4. Split in shards and save
UpperCAmelCase_ , UpperCAmelCase_ = shard_checkpoint(__A )
for shard_file, shard in shards.items():
torch.save(__A , os.path.join(__A , __A ) )
if index is not None:
UpperCAmelCase_ = os.path.join(__A , __A )
# Save the index as well
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(__A , indent=2 , sort_keys=__A ) + '''\n'''
f.write(__A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
UpperCAmelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase_ = torch.load(os.path.join(__A , __A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__A , __A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__A )
model.push_to_hub(__A , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__A )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 51 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = 'docs/source/en/_toctree.yml'
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = defaultdict(a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : Tuple = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(a ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(a , key=lambda a : s["title"].lower() )
def A_ ( a=False ):
"""simple docstring"""
with open(a , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : str = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : List[str] = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE_ : List[str] = [(idx, section) for idx, section in enumerate(a ) if 'sections' in section]
SCREAMING_SNAKE_CASE_ : List[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : Tuple = modality_doc['sections']
SCREAMING_SNAKE_CASE_ : int = clean_model_doc_toc(a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : List[str] = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : List[Any] = api_doc
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a , allow_unicode=a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : List[str] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 253 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Optional[Any] ):
# test for the above condition
self.test()
def a ( self : Dict ):
__UpperCAmelCase = 0
__UpperCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__UpperCAmelCase = self.advance()
if not self.does_advance(_lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.update(_lowercase )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def a ( self : Optional[Any] ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a ( self : Optional[int] , _lowercase : int ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a ( self : List[Any] , _lowercase : int ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a ( self : Any ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a ( self : str ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a ( self : Dict , _lowercase : Dict=False ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : List[int] ):
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__UpperCAmelCase = token_ids
__UpperCAmelCase = len(self.token_ids )
__UpperCAmelCase = -1 # the index of the currently fulfilled step
__UpperCAmelCase = False
def a ( self : str ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def a ( self : Tuple , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def a ( self : Union[str, Any] , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowercase )}''' )
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
if self.does_advance(_lowercase ):
self.fulfilled_idx += 1
__UpperCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCAmelCase = True
__UpperCAmelCase = completed
else:
# failed to make progress.
__UpperCAmelCase = True
self.reset()
return stepped, completed, reset
def a ( self : Union[str, Any] ):
__UpperCAmelCase = False
__UpperCAmelCase = 0
def a ( self : Any ):
return self.seqlen - (self.fulfilled_idx + 1)
def a ( self : str , _lowercase : str=False ):
__UpperCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__UpperCAmelCase = self.seqlen
__UpperCAmelCase = self.fulfilled_idx
__UpperCAmelCase = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : List[List[int]] , _lowercase : Dict=True ):
__UpperCAmelCase = max([len(_lowercase ) for one in nested_token_ids] )
__UpperCAmelCase = {}
for token_ids in nested_token_ids:
__UpperCAmelCase = root
for tidx, token_id in enumerate(_lowercase ):
if token_id not in level:
__UpperCAmelCase = {}
__UpperCAmelCase = level[token_id]
if no_subsets and self.has_subsets(_lowercase , _lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
__UpperCAmelCase = root
def a ( self : Tuple , _lowercase : Union[str, Any] ):
__UpperCAmelCase = self.trie
for current_token in current_seq:
__UpperCAmelCase = start[current_token]
__UpperCAmelCase = list(start.keys() )
return next_tokens
def a ( self : Any , _lowercase : int ):
__UpperCAmelCase = self.next_tokens(_lowercase )
return len(_lowercase ) == 0
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = list(root.values() )
if len(_lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowercase ) for nn in next_nodes] )
def a ( self : int , _lowercase : List[Any] , _lowercase : List[str] ):
__UpperCAmelCase = self.count_leaves(_lowercase )
return len(_lowercase ) != leaf_count
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : List[List[int]] ):
super(_lowercase , self ).__init__()
if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_lowercase , _lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_lowercase , _lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__UpperCAmelCase = DisjunctiveTrie(_lowercase )
__UpperCAmelCase = nested_token_ids
__UpperCAmelCase = self.trie.max_height
__UpperCAmelCase = []
__UpperCAmelCase = False
def a ( self : Optional[int] ):
__UpperCAmelCase = self.trie.next_tokens(self.current_seq )
if len(_lowercase ) == 0:
return None
else:
return token_list
def a ( self : int , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}''' )
__UpperCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def a ( self : Optional[int] , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowercase )}''' )
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
if self.does_advance(_lowercase ):
self.current_seq.append(_lowercase )
__UpperCAmelCase = True
else:
__UpperCAmelCase = True
self.reset()
__UpperCAmelCase = self.trie.reached_leaf(self.current_seq )
__UpperCAmelCase = completed
return stepped, completed, reset
def a ( self : Optional[int] ):
__UpperCAmelCase = False
__UpperCAmelCase = []
def a ( self : Any ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def a ( self : List[Any] , _lowercase : Optional[int]=False ):
__UpperCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCAmelCase = self.seqlen
__UpperCAmelCase = self.current_seq
__UpperCAmelCase = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self : Optional[Any] , _lowercase : List[Constraint] ):
__UpperCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__UpperCAmelCase = max([c.seqlen for c in constraints] )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = False
self.init_state()
def a ( self : Dict ):
__UpperCAmelCase = []
__UpperCAmelCase = None
__UpperCAmelCase = [constraint.copy(stateful=_lowercase ) for constraint in self.constraints]
def a ( self : Tuple ):
__UpperCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def a ( self : Optional[Any] ):
__UpperCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCAmelCase = constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
else:
__UpperCAmelCase = self.inprogress_constraint.advance()
if isinstance(_lowercase , _lowercase ):
token_list.append(_lowercase )
elif isinstance(_lowercase , _lowercase ):
token_list.extend(_lowercase )
if len(_lowercase ) == 0:
return None
else:
return token_list
def a ( self : Tuple , _lowercase : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCAmelCase , __UpperCAmelCase = self.add(_lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def a ( self : Union[str, Any] , _lowercase : int ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__UpperCAmelCase , __UpperCAmelCase = False, False
if self.completed:
__UpperCAmelCase = True
__UpperCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.inprogress_constraint.update(_lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowercase ) )
__UpperCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowercase ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = pending_constraint.update(_lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowercase )
__UpperCAmelCase = None
if not complete and stepped:
__UpperCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def a ( self : List[str] , _lowercase : Dict=True ):
__UpperCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCAmelCase = [
constraint.copy(stateful=_lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCAmelCase = self.inprogress_constraint.copy(stateful=_lowercase )
__UpperCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bloom"
a__ : List[Any] = ["past_key_values"]
a__ : Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=25_08_80 , _lowercase : str=64 , _lowercase : int=2 , _lowercase : Union[str, Any]=8 , _lowercase : Optional[Any]=1E-5 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=True , _lowercase : Any=1 , _lowercase : Dict=2 , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=0.0 , _lowercase : str=0.0 , _lowercase : str=1 , _lowercase : int=False , **_lowercase : List[str] , ):
__UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase = kwargs.pop('''n_embed''' , _lowercase )
__UpperCAmelCase = hidden_size if n_embed is None else n_embed
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = apply_residual_connection_post_layernorm
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = version.parse("1.12" )
def __init__( self : Optional[int] , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[int] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' , inverted_values_shape=_lowercase )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Any ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
@property
def a ( self : Dict ):
return 1E-3
def a ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 86 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[str] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
UpperCamelCase__ = """swinv2"""
UpperCamelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , __UpperCamelCase : List[str]=2_2_4 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Dict=3 , __UpperCamelCase : int=9_6 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Any=[3, 6, 1_2, 2_4] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Tuple=4.0 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : Union[str, Any]=3_2 , **__UpperCamelCase : Dict , )->Union[str, Any]:
super().__init__(**__UpperCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__UpperCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
_UpperCAmelCase = (0, 0, 0, 0)
| 260 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(__UpperCAmelCase)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4)) | 243 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
A__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCAmelCase_ )
A__ = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
A__ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
A__ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
A__ = F"""layers_{str(UpperCAmelCase_ )}"""
# Self-Attention
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A__ = flax_model.params["""encoder"""]["""block"""][str(UpperCAmelCase_ )]["""layer"""]
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = F"""layers_{str(UpperCAmelCase_ )}"""
# Self-Attention
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
A__ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
A__ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
A__ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
A__ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A__ = flax_model.params["""decoder"""]["""block"""][str(UpperCAmelCase_ )]["""layer"""]
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(UpperCAmelCase_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 69 |
"""simple docstring"""
import argparse
SCREAMING_SNAKE_CASE_ : Any = 'docs/source/_static/js/custom.js'
def _snake_case ( UpperCAmelCase_ : List[Any] ):
with open(UpperCAmelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
A__ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
A__ = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
update_custom_js(args.version)
| 69 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase : Any ='''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , __lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase : Optional[Any] ='''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , __lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Dict ='''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , __lowerCAmelCase ):
pass
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
UpperCAmelCase : Tuple ='''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , __lowerCAmelCase ) is None
with patch_submodule(_test_patching , '''len''' , __lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] ='''__test_patch_submodule_start_and_stop_mock__'''
UpperCAmelCase : str =patch_submodule(_test_patching , '''open''' , __lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase : Tuple ='''__test_patch_submodule_successive_join__'''
UpperCAmelCase : str ='''__test_patch_submodule_successive_dirname__'''
UpperCAmelCase : List[str] ='''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , __lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.rename''' , __lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , __lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , __lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int ='''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __lowerCAmelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __lowerCAmelCase ):
pass
| 348 | from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_dataset()
UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =get_dataset()
UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 348 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _a :
A = None
A = False
A = False
A = False
A = None
A = None
A = False
A = False
A = False
A = True
A = None
A = 1
A = None
A = False
A = None
A = None
def __snake_case (self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE_ ) for k, v in self.__dict__.items()} )
| 82 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
a : Tuple = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 82 | 1 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def UpperCAmelCase ( a_ ) -> list[str]:
"""simple docstring"""
__A = []
__A = 1_1
__A = int("1" + "0" * digit_len )
for num in range(a_ , a_ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__A = 1_0
return solutions
def UpperCAmelCase ( a_ = 2 ) -> int:
"""simple docstring"""
__A = 1.0
for fraction in fraction_list(a_ ):
__A = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 15 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase_ , lowercase_ )
if "finetuned" not in model_name:
UpperCAmelCase = False
if "finetuned" in model_name:
UpperCAmelCase = 'huggingface/label-files'
if "kinetics" in model_name:
UpperCAmelCase = 400
UpperCAmelCase = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
UpperCAmelCase = 174
UpperCAmelCase = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
UpperCAmelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if "small" in model_name:
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = 192
UpperCAmelCase = 768
elif "large" in model_name:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 8
UpperCAmelCase = 512
UpperCAmelCase = 2048
elif "huge" in model_name:
UpperCAmelCase = 1280
UpperCAmelCase = 5120
UpperCAmelCase = 32
UpperCAmelCase = 16
UpperCAmelCase = 12
UpperCAmelCase = 8
UpperCAmelCase = 640
UpperCAmelCase = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def _lowerCAmelCase ( lowercase_ ):
if "encoder." in name:
UpperCAmelCase = name.replace('encoder.' , '' )
if "cls_token" in name:
UpperCAmelCase = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
UpperCAmelCase = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCAmelCase = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
UpperCAmelCase = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
UpperCAmelCase = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "attn" in name:
UpperCAmelCase = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
UpperCAmelCase = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
UpperCAmelCase = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
UpperCAmelCase = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
UpperCAmelCase = name.replace('head' , 'classifier' )
return name
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(lowercase_ )
if key.startswith('encoder.' ):
UpperCAmelCase = key.replace('encoder.' , '' )
if "qkv" in key:
UpperCAmelCase = key.split('.' )
if key.startswith('decoder.blocks' ):
UpperCAmelCase = config.decoder_hidden_size
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = 'decoder.decoder_layers.'
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = config.hidden_size
UpperCAmelCase = int(key_split[1] )
UpperCAmelCase = 'videomae.encoder.layer.'
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ):
UpperCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
UpperCAmelCase = np.load(lowercase_ )
return list(lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = get_videomae_config(lowercase_ )
if "finetuned" in model_name:
UpperCAmelCase = VideoMAEForVideoClassification(lowercase_ )
else:
UpperCAmelCase = VideoMAEForPreTraining(lowercase_ )
# download original checkpoint, hosted on Google Drive
UpperCAmelCase = 'pytorch_model.bin'
gdown.cached_download(lowercase_ , lowercase_ , quiet=lowercase_ )
UpperCAmelCase = torch.load(lowercase_ , map_location='cpu' )
if "model" in files:
UpperCAmelCase = files['model']
else:
UpperCAmelCase = files['module']
UpperCAmelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# verify model on basic input
UpperCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCAmelCase = prepare_video()
UpperCAmelCase = image_processor(lowercase_ , return_tensors='pt' )
if "finetuned" not in model_name:
UpperCAmelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
UpperCAmelCase = torch.load(lowercase_ )
UpperCAmelCase = model(**lowercase_ )
UpperCAmelCase = outputs.logits
UpperCAmelCase = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCAmelCase = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCAmelCase = torch.Size([1, 400] )
UpperCAmelCase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
UpperCAmelCase = torch.Size([1, 1408, 1536] )
UpperCAmelCase = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCAmelCase = torch.Size([1, 174] )
UpperCAmelCase = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCAmelCase = outputs.loss
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
model.save_pretrained(lowercase_ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(lowercase_ , organization='nielsr' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 181 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase (lowerCAmelCase_ ):
_UpperCamelCase = DistilBertTokenizer
_UpperCamelCase = DistilBertTokenizerFast
_UpperCamelCase = True
@slow
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__lowerCAmelCase : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=snake_case_ )
__lowerCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=snake_case_ )
__lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 275 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
import os
import sys
A__ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A__ = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 44 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ = logging.get_logger(__name__)
A__ = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """bart"""
__lowerCAmelCase : Any = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :int ,__lowercase :Union[str, Any]=5_0_2_6_5 ,__lowercase :Optional[int]=1_0_2_4 ,__lowercase :int=1_2 ,__lowercase :Tuple=4_0_9_6 ,__lowercase :str=1_6 ,__lowercase :List[Any]=1_2 ,__lowercase :str=4_0_9_6 ,__lowercase :List[str]=1_6 ,__lowercase :Optional[int]=0.0 ,__lowercase :List[str]=0.0 ,__lowercase :int="gelu" ,__lowercase :int=1_0_2_4 ,__lowercase :Any=0.1 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[Any]=0.0 ,__lowercase :Tuple=0.02 ,__lowercase :List[str]=0.0 ,__lowercase :int=False ,__lowercase :Any=True ,__lowercase :List[str]=3 ,__lowercase :List[Any]=1 ,__lowercase :List[str]=0 ,__lowercase :List[str]=2 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=2 ,__lowercase :Dict=2 ,**__lowercase :List[str] ,):
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Union[str, Any] = decoder_attention_heads
snake_case__ : int = dropout
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : List[Any] = activation_function
snake_case__ : Any = init_std
snake_case__ : Union[str, Any] = encoder_layerdrop
snake_case__ : Optional[int] = decoder_layerdrop
snake_case__ : List[Any] = classifier_dropout
snake_case__ : Tuple = use_cache
snake_case__ : List[str] = encoder_layers
snake_case__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase ,pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,is_encoder_decoder=__lowercase ,decoder_start_token_id=__lowercase ,forced_eos_token_id=__lowercase ,**__lowercase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,__lowercase ):
snake_case__ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : Union[str, Any] = {0: '''batch'''}
snake_case__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(__lowercase ):
snake_case__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self :Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[str] = super().outputs
else:
snake_case__ : List[str] = super(__lowercase ,self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Any = self.num_layers
for i in range(__lowercase ):
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Generate decoder inputs
snake_case__ : List[Any] = seq_length if not self.use_past else 1
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
snake_case__ : Any = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**__lowercase ,**__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs['''input_ids'''].shape
snake_case__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Dict = self.num_attention_heads
snake_case__ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[int] = decoder_seq_length + 3
snake_case__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase ,__lowercase )] ,dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : List[Any] = self.num_layers
snake_case__ : List[Any] = min(__lowercase ,__lowercase )
snake_case__ : Dict = max(__lowercase ,__lowercase ) - min_num_layers
snake_case__ : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
snake_case__ : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase ,__lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowerCamelCase ( self :List[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ , snake_case__ : Tuple = self.num_layers
snake_case__ , snake_case__ : List[str] = self.num_attention_heads
snake_case__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = common_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase ,__lowercase ,dtype=__lowercase )] ,dim=1 )
snake_case__ : Union[str, Any] = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowerCamelCase ( self :str ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = tokenizer.num_special_tokens_to_add(__lowercase )
snake_case__ : int = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Optional[Any] = dict(tokenizer(__lowercase ,return_tensors=__lowercase ) )
return common_inputs
def __lowerCamelCase ( self :int ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
else:
snake_case__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
return common_inputs
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :Tuple ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(__lowercase ,__lowercase ,__lowercase ,__lowercase )
else:
snake_case__ : int = super(__lowercase ,self )._flatten_past_key_values_(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
| 44 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Any, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCAmelCase_ )
__lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__lowerCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
__lowerCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__lowerCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
__lowerCAmelCase = F"""layers_{str(lowerCAmelCase_ )}"""
# Self-Attention
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__lowerCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__lowerCAmelCase = flax_model.params['encoder']['block'][str(lowerCAmelCase_ )]['layer']
__lowerCAmelCase = tax_attention_key
__lowerCAmelCase = tax_attention_out
__lowerCAmelCase = tax_attention_query
__lowerCAmelCase = tax_attention_value
__lowerCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_global_layer_norm
if split_mlp_wi:
__lowerCAmelCase = tax_mlp_wi_a
__lowerCAmelCase = tax_mlp_wi_a
else:
__lowerCAmelCase = tax_mlp_wi
__lowerCAmelCase = tax_mlp_wo
__lowerCAmelCase = tax_mlp_layer_norm
__lowerCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
__lowerCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
__lowerCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
__lowerCAmelCase = tax_encoder_global_rel_embedding
# Assigning
__lowerCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
__lowerCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__lowerCAmelCase = F"""layers_{str(lowerCAmelCase_ )}"""
# Self-Attention
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
__lowerCAmelCase = tax_enc_dec_attention_module['key']['kernel']
__lowerCAmelCase = tax_enc_dec_attention_module['out']['kernel']
__lowerCAmelCase = tax_enc_dec_attention_module['query']['kernel']
__lowerCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__lowerCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__lowerCAmelCase = flax_model.params['decoder']['block'][str(lowerCAmelCase_ )]['layer']
__lowerCAmelCase = tax_attention_key
__lowerCAmelCase = tax_attention_out
__lowerCAmelCase = tax_attention_query
__lowerCAmelCase = tax_attention_value
__lowerCAmelCase = tax_pre_attention_layer_norm
__lowerCAmelCase = tax_enc_dec_attention_key
__lowerCAmelCase = tax_enc_dec_attention_out
__lowerCAmelCase = tax_enc_dec_attention_query
__lowerCAmelCase = tax_enc_dec_attention_value
__lowerCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
__lowerCAmelCase = tax_mlp_wi_a
__lowerCAmelCase = tax_mlp_wi_a
else:
__lowerCAmelCase = tax_mlp_wi
__lowerCAmelCase = tax_mlp_wo
__lowerCAmelCase = txa_mlp_layer_norm
__lowerCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
__lowerCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
__lowerCAmelCase = txa_decoder_norm
# Only for layer 0:
__lowerCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
__lowerCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
__lowerCAmelCase = tax_model['target']['token_embedder']['embedding']
__lowerCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__lowerCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCAmelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_snake_case : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 284 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 1 |
def snake_case_(_UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_snake_case = set()
return any(
node not in visited and depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for node in graph )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
visited.add(_UpperCamelCase )
rec_stk.add(_UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 278 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[int] = ["speech"]
def __init__( self : str , *A__ : List[str] , **A__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['''speech'''] )
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[Any] = ["speech"]
def __init__( self : Dict , *A__ : int , **A__ : int ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 278 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase = list[list[float | int]]
def lowercase_ ( lowerCAmelCase__ : Matrix , lowerCAmelCase__ : Matrix ):
"""simple docstring"""
__UpperCAmelCase : int = len(lowerCAmelCase__ )
__UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
__UpperCAmelCase : Tuple = matrix[row][col]
__UpperCAmelCase : Dict = vector[row][0]
__UpperCAmelCase : str = 0
__UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
__UpperCAmelCase : List[str] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = augmented[rowa][col] / augmented[row][col]
__UpperCAmelCase : str = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase__ )
]
def lowercase_ ( lowerCAmelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase : int = len(lowerCAmelCase__ )
__UpperCAmelCase : Matrix = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
__UpperCAmelCase : Matrix = [[0] for _ in range(lowerCAmelCase__ )]
__UpperCAmelCase : Matrix
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
__UpperCAmelCase : Any = (x_val + 1) ** (size - col - 1)
__UpperCAmelCase : Any = y_val
__UpperCAmelCase : Union[str, Any] = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase_ ( lowerCAmelCase__ : Callable[[int], int] = question_function , lowerCAmelCase__ : int = 10 ):
"""simple docstring"""
__UpperCAmelCase : list[int] = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
__UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCAmelCase : int = 0
__UpperCAmelCase : Callable[[int], int]
__UpperCAmelCase : int
for poly in polynomials:
__UpperCAmelCase : Optional[int] = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 254 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_UpperCamelCase = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_UpperCamelCase = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __A ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase )
return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
| 254 | 1 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : List[Any] = set()
# edges = list of graph's edges
UpperCamelCase : int = get_edges(_a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase : Dict = edges.pop()
chosen_vertices.add(_a )
chosen_vertices.add(_a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_a )
return chosen_vertices
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 364 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
a__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
a__ : Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
a__ : str = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
a__ : Optional[Any] = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss
a__ : Optional[int] = -(labels.shape[-1] * loss.item())
a__ : str = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 170 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=a_ , large=a_ , share_emb=a_ , use_bert_emb=a_ , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCamelCase : Dict = torch.load(a_ , lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : storage )
UpperCamelCase : Tuple = AbsSummarizer(a_ , torch.device('''cpu''' ) , a_ )
original.eval()
UpperCamelCase : Dict = BertAbsSummarizer(a_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCamelCase : Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCamelCase : List[Any] = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
UpperCamelCase : Union[str, Any] = torch.tensor(a_ ).unsqueeze(0 )
UpperCamelCase : List[str] = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
UpperCamelCase : Any = torch.tensor(a_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCamelCase : Union[str, Any] = encoder_input_ids
UpperCamelCase : Optional[int] = decoder_input_ids
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
UpperCamelCase : List[str] = None
UpperCamelCase : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCamelCase : Tuple = original(a_ , a_ , a_ , a_ , a_ , a_ , a_ )[0]
UpperCamelCase : str = original.generator(a_ )
UpperCamelCase : int = new_model(
a_ , a_ , a_ , a_ , a_ )[0]
UpperCamelCase : Union[str, Any] = new_model.generator(a_ )
UpperCamelCase : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_ ) )
UpperCamelCase : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_ ) )
UpperCamelCase : Dict = torch.allclose(a_ , a_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 367 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Dict, a_: Any, a_: int, a_: Optional[Any]=None, a_: int=None ):
'''simple docstring'''
_snake_case : List[str] = start
_snake_case : Optional[Any] = end
_snake_case : Union[str, Any] = val
_snake_case : str = (start + end) // 2
_snake_case : List[str] = left
_snake_case : Optional[Any] = right
def __repr__( self: int ):
'''simple docstring'''
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowercase:
'''simple docstring'''
def __init__( self: Union[str, Any], a_: Sequence, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = collection
_snake_case : List[str] = function
if self.collection:
_snake_case : str = self._build_tree(0, len(a_ ) - 1 )
def UpperCamelCase_ ( self: List[str], a_: Tuple, a_: List[str] ):
'''simple docstring'''
self._update_tree(self.root, a_, a_ )
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
return self._query_range(self.root, a_, a_ )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: int ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(a_, a_, self.collection[start] )
_snake_case : Optional[Any] = (start + end) // 2
_snake_case : List[str] = self._build_tree(a_, a_ )
_snake_case : List[Any] = self._build_tree(mid + 1, a_ )
return SegmentTreeNode(a_, a_, self.fn(left.val, right.val ), a_, a_ )
def UpperCamelCase_ ( self: Optional[int], a_: List[Any], a_: Tuple, a_: Dict ):
'''simple docstring'''
if node.start == i and node.end == i:
_snake_case : Any = val
return
if i <= node.mid:
self._update_tree(node.left, a_, a_ )
else:
self._update_tree(node.right, a_, a_ )
_snake_case : List[Any] = self.fn(node.left.val, node.right.val )
def UpperCamelCase_ ( self: List[str], a_: Any, a_: Any, a_: Dict ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left, a_, a_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left, a_, node.mid ), self._query_range(node.right, node.mid + 1, a_ ), )
else:
# range in right child tree
return self._query_range(node.right, a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
if self.root is not None:
_snake_case : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
A_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 64 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowercase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self , __a ) -> np.ndarray:
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def snake_case_ (self , __a ) -> np.ndarray:
UpperCamelCase = self.get_masked_index(__a )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ (self , __a ) -> Any:
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def snake_case_ (self , __a , __a=None , **__a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = self.model(**__a )
UpperCamelCase = model_inputs["input_ids"]
return model_outputs
def snake_case_ (self , __a , __a=5 , __a=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["input_ids"][0]
UpperCamelCase = model_outputs["logits"]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(__a , 0 )
UpperCamelCase = tf.math.top_k(__a , k=__a )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(__a )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCamelCase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def snake_case_ (self , __a , __a=None ) -> Any:
if isinstance(__a , __a ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(__a , __a )
if id_ is None:
UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
UpperCamelCase = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCamelCase = np.array(__a )
return target_ids
def snake_case_ (self , __a=None , __a=None ) -> int:
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(__a , __a )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self , __a , *__a , **__a ) -> Tuple:
UpperCamelCase = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 153 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47 | 0 |
import itertools
import math
def lowerCAmelCase_ ( snake_case_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5,int(math.sqrt(snake_case_ ) + 1 ),6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ):
_A : Dict = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def lowerCAmelCase_ ( snake_case_ = 10001 ):
return next(itertools.islice(prime_generator(),nth - 1,snake_case_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
A_ :Optional[int] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A_ :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A_ :Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 71 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_UpperCamelCase : List[str] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def snake_case (A_ :Optional[int] , A_ :int , A_ :Union[str, Any] , A_ :List[Any]=None ):
'''simple docstring'''
_A : Any = XLNetConfig.from_json_file(A__ )
_A : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A : Any = finetuning_task
_A : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A : Union[str, Any] = XLNetForSequenceClassification(A__ )
elif "squad" in finetuning_task:
_A : Dict = finetuning_task
_A : Optional[Any] = XLNetForQuestionAnswering(A__ )
else:
_A : Dict = XLNetLMHeadModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(A__ , A__ , A__ )
# Save pytorch-model
_A : Optional[Any] = os.path.join(A__ , A__ )
_A : Optional[Any] = os.path.join(A__ , A__ )
print(f'''Save PyTorch model to {os.path.abspath(A__ )}''' )
torch.save(model.state_dict() , A__ )
print(f'''Save configuration file to {os.path.abspath(A__ )}''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 351 |
"""simple docstring"""
import os
import sys
_UpperCamelCase : str = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_UpperCamelCase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case (*A_ :Optional[int] , **A_ :int ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case (*A_ :Optional[int] , **A_ :List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case (*A_ :Optional[Any] , **A_ :Tuple ):
'''simple docstring'''
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case (*A_ :str , **A_ :Optional[int] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case (*A_ :Optional[Any] , **A_ :Dict ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case (*A_ :Dict , **A_ :str ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case (*A_ :Dict , **A_ :List[Any] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 186 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_UpperCAmelCase : str = "src/transformers"
# Matches is_xxx_available()
_UpperCAmelCase : Union[str, Any] = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_UpperCAmelCase : Tuple = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCAmelCase : int = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_UpperCAmelCase : Optional[int] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_UpperCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCAmelCase : List[Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCAmelCase : int = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCAmelCase : Union[str, Any] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_UpperCAmelCase : Dict = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_UpperCAmelCase : List[Any] = re.compile(r"^\s*try:")
# Catches a line with else:
_UpperCAmelCase : Optional[int] = re.compile(r"^\s*else:")
def UpperCAmelCase__ ( lowerCamelCase ):
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
lowercase :Union[str, Any] = [b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
lowercase :str = f.readlines()
lowercase :List[str] = 0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase :List[str] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowercase :List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
lowercase :List[Any] = _re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
lowercase :Tuple = re.findall("\[([^\]]+)\]", __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowercase :Dict = _re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
lowercase :int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowercase :str = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase :Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase :Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase :Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowercase :Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
lowercase :List[str] = _re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(", " )
lowercase :Optional[int] = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
lowercase :Optional[int] = _re_between_brackets.search(__lowerCamelCase ).groups()[0].split(", " )
lowercase :Optional[int] = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowercase :str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase :Optional[Any] = []
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowercase :Optional[Any] = lines[line_index]
lowercase :int = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase :List[str] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase :Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase :Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase :Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowercase :Union[str, Any] = lines[line_index]
lowercase :int = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase :str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase :Dict = []
for key in import_dict_objects.keys():
lowercase :Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowercase :Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase :Tuple = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def UpperCAmelCase__ ( ):
lowercase :Dict = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
lowercase :str = os.path.join(__lowerCamelCase, "__init__.py" )
lowercase :Optional[int] = parse_init(__lowerCamelCase )
if objects is not None:
lowercase :str = analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowercase :Optional[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError("\n\n".join(__lowerCamelCase ) )
def UpperCAmelCase__ ( ):
lowercase :List[Any] = []
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
lowercase :Optional[int] = str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
lowercase :int = short_path.replace(os.path.sep, "." )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase :Union[str, Any] = str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
lowercase :List[Any] = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
_UpperCAmelCase : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def UpperCAmelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowercase :Optional[int] = importlib.util.spec_from_file_location(
"transformers", os.path.join(__lowerCamelCase, "__init__.py" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
lowercase :List[Any] = spec.loader.load_module()
lowercase :str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
lowercase :str = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 236 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _UpperCAmelCase:
def __init__( self , __a , __a=sys.maxsize) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''bilinear'''
_UpperCamelCase = max_size
_UpperCamelCase = short_edge_length
def __call__( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = []
for img in imgs:
_UpperCamelCase , _UpperCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCamelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
_UpperCamelCase = size * 1.0 / min(__a , __a)
if h < w:
_UpperCamelCase , _UpperCamelCase = size, scale * w
else:
_UpperCamelCase , _UpperCamelCase = scale * h, size
if max(__a , __a) > self.max_size:
_UpperCamelCase = self.max_size * 1.0 / max(__a , __a)
_UpperCamelCase = newh * scale
_UpperCamelCase = neww * scale
_UpperCamelCase = int(neww + 0.5)
_UpperCamelCase = int(newh + 0.5)
if img.dtype == np.uinta:
_UpperCamelCase = Image.fromarray(__a)
_UpperCamelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
_UpperCamelCase = np.asarray(__a)
else:
_UpperCamelCase = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
_UpperCamelCase = nn.functional.interpolate(
__a , (newh, neww) , mode=self.interp_method , align_corners=__a).squeeze(0)
img_augs.append(__a)
return img_augs
class _UpperCAmelCase:
def __init__( self , __a) -> int:
'''simple docstring'''
_UpperCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
_UpperCamelCase = cfg.INPUT.FORMAT
_UpperCamelCase = cfg.SIZE_DIVISIBILITY
_UpperCamelCase = cfg.PAD_VALUE
_UpperCamelCase = cfg.INPUT.MAX_SIZE_TEST
_UpperCamelCase = cfg.MODEL.DEVICE
_UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
_UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
_UpperCamelCase = lambda __a: (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = tuple(max(__a) for s in zip(*[img.shape for img in images]))
_UpperCamelCase = [im.shape[-2:] for im in images]
_UpperCamelCase = [
nn.functional.pad(
__a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__a , __a)
]
return torch.stack(__a), torch.tensor(__a)
def __call__( self , __a , __a=False) -> Any:
'''simple docstring'''
with torch.no_grad():
if not isinstance(__a , __a):
_UpperCamelCase = [images]
if single_image:
assert len(__a) == 1
for i in range(len(__a)):
if isinstance(images[i] , torch.Tensor):
images.insert(__a , images.pop(__a).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
__a , torch.as_tensor(img_tensorize(images.pop(__a) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
_UpperCamelCase = torch.tensor([im.shape[:2] for im in images])
_UpperCamelCase = self.aug(__a)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCamelCase = [self.normalizer(__a) for x in images]
# now pad them to do the following operations
_UpperCamelCase , _UpperCamelCase = self.pad(__a)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCamelCase = torch.true_divide(__a , __a)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
_UpperCamelCase , _UpperCamelCase = box_size
tensor[:, 0].clamp_(min=0, max=__snake_case )
tensor[:, 1].clamp_(min=0, max=__snake_case )
tensor[:, 2].clamp_(min=0, max=__snake_case )
tensor[:, 3].clamp_(min=0, max=__snake_case )
| 100 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'biogpt'
def __init__( self , __a=4_23_84 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10_24 , __a=0.02 , __a=1e-12 , __a=True , __a=True , __a=0.0 , __a=0.0 , __a=1 , __a=0 , __a=2 , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
| 100 | 1 |
from torch import nn
def A ( _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 339 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = d_model
A_ : Optional[Any] = d_kv
A_ : str = d_ff
A_ : int = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[str] = dropout_rate
A_ : Dict = layer_norm_epsilon
A_ : str = initializer_factor
A_ : Dict = feed_forward_proj
A_ : int = use_cache
A_ : Optional[int] = self.feed_forward_proj.split('''-''' )
A_ : Optional[Any] = act_info[-1]
A_ : Optional[Any] = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : List[str] = '''past_encoder_sequence + sequence'''
A_ : Optional[int] = {0: '''batch'''}
A_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 186 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.json"""}
__lowercase = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__lowercase = {"""mgp-str""": 27}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any="[GO]" , __UpperCAmelCase : int="[GO]" , __UpperCAmelCase : Optional[Any]="[s]" , __UpperCAmelCase : int="[GO]" , **__UpperCAmelCase : List[str]):
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
a : Tuple = json.load(lowerCAmelCase__)
a : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[str]):
return len(self.vocab)
def __snake_case ( self : Dict):
return dict(self.vocab , **self.added_tokens_encoder)
def __snake_case ( self : Dict , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = []
for s in text:
char_tokens.extend(lowerCAmelCase__)
return char_tokens
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]):
return self.decoder.get(lowerCAmelCase__)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase__))
return
a : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
return (vocab_file,)
| 367 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
| 55 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE :Dict = 50000
__SCREAMING_SNAKE_CASE :Any = 5000
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = os.path.split(__file__)
__SCREAMING_SNAKE_CASE :List[str] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for i in range(lowercase__ ):
_UpperCAmelCase = dataset[i]
@get_duration
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> str:
'''simple docstring'''
for i in range(0 , len(lowercase__ ) , lowercase__ ):
_UpperCAmelCase = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Dict , __lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
_UpperCAmelCase = dataset[i]
@get_duration
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Any , __lowercase : Dict , __lowercase : Any ) -> List[Any]:
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
_UpperCAmelCase = dataset[i : i + batch_size]
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
_UpperCAmelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
_UpperCAmelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCAmelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(lowercase__ , "dataset.arrow" ) , lowercase__ , num_examples=lowercase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
_UpperCAmelCase = func(lowercase__ , **lowercase__ )
print("shuffling dataset" )
_UpperCAmelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowercase__ ) )
_UpperCAmelCase = func(
lowercase__ , **lowercase__ )
with open(lowercase__ , "wb" ) as f:
f.write(json.dumps(lowercase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 362 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = set()
# Replace all the whitespace in our sentence
_UpperCAmelCase = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 26
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = [False] * 26
for char in input_str:
if char.islower():
_UpperCAmelCase = True
elif char.isupper():
_UpperCAmelCase = True
return all(__lowercase )
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowercase ) )
print(timeit("is_pangram_faster()" , setup=__lowercase ) )
print(timeit("is_pangram_fastest()" , setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 156 | 0 |
"""simple docstring"""
import string
from math import logaa
def lowercase ( __snake_case : str , __snake_case : str ):
lowercase_ : Union[str, Any] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowercase_ : str = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase ( __snake_case : str , __snake_case : str ):
lowercase_ : Union[str, Any] = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase_ : Optional[Any] = corpus_without_punctuation.split('''\n''' )
lowercase_ : Tuple = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowercase ( __snake_case : int , __snake_case : int , __snake_case : int=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def lowercase ( __snake_case : int , __snake_case : int ):
return round(tf * idf , 3 )
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[Any] ):
lowercase_ : int = 3
lowercase_ : Tuple = 250
lowercase_ : Union[str, Any] = ids_tensor((batch_size, length) , lowercase_ )
lowercase_ : str = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : str = self._get_tensors(5 )
lowercase_ : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : Any = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : int = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Any = MaxLengthCriteria(max_length=10 )
lowercase_ , lowercase_ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : str = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : Dict = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowercase_ , lowercase_ : str = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ , lowercase_ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Tuple = self._get_tensors(5 )
lowercase_ : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
lowercase_ : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowercase_ : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 21 | '''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase (lowercase_: Tuple ) -> Optional[Any]:
A__ : Optional[Any] = botoa.client("""iam""" )
A__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ , AssumeRolePolicyDocument=json.dumps(lowercase_ , indent=2 ) )
A__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowercase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def UpperCamelCase (lowercase_: str ) -> Dict:
A__ : Optional[Any] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def UpperCamelCase () -> Tuple:
A__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowercase_ , )
A__ : str = None
if credentials_configuration == 0:
A__ : Any = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
A__ : Any = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
A__ : Optional[Any] = _ask_field("""AWS Access Key ID: """ )
A__ : Any = aws_access_key_id
A__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
A__ : List[Any] = aws_secret_access_key
A__ : Dict = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
A__ : Union[str, Any] = aws_region
A__ : Tuple = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowercase_ , )
if role_management == 0:
A__ : str = _ask_field("""Enter your IAM role name: """ )
else:
A__ : Tuple = """accelerate_sagemaker_execution_role"""
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowercase_ )
A__ : Any = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
A__ : Dict = None
if is_custom_docker_image:
A__ : Any = _ask_field("""Enter your Docker image: """ , lambda lowercase_ : str(lowercase_ ).lower() )
A__ : List[str] = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
A__ : Dict = None
if is_sagemaker_inputs_enabled:
A__ : int = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowercase_ : str(lowercase_ ).lower() , )
A__ : Optional[Any] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
A__ : Tuple = None
if is_sagemaker_metrics_enabled:
A__ : Optional[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowercase_ : str(lowercase_ ).lower() , )
A__ : List[str] = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
A__ : Any = {}
A__ : Union[str, Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
A__ : Optional[int] = """dynamo_"""
A__ : Optional[Any] = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A__ : Any = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
A__ : Union[str, Any] = _ask_options(
"""Which mode do you want to use?""" , lowercase_ , lambda lowercase_ : TORCH_DYNAMO_MODES[int(lowercase_ )] , default="""default""" , )
A__ : Optional[Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
A__ : Dict = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase_ , error_message="""Please enter yes or no.""" , )
A__ : Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
A__ : Tuple = _ask_options(
lowercase_ , lowercase_ , lambda lowercase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A__ : List[str] = _ask_field(lowercase_ , lambda lowercase_ : str(lowercase_ ).lower() , default="""ml.p3.2xlarge""" )
A__ : Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A__ : List[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , lowercase_ , default=1 , )
A__ : List[str] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowercase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase_ , use_cpu=lowercase_ , dynamo_config=lowercase_ , eca_instance_type=lowercase_ , profile=lowercase_ , region=lowercase_ , iam_role_name=lowercase_ , mixed_precision=lowercase_ , num_machines=lowercase_ , sagemaker_inputs_file=lowercase_ , sagemaker_metrics_file=lowercase_ , )
| 192 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = VOCAB_FILES_NAMES
UpperCAmelCase__: Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[int] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : List[Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Optional[int] = do_lower_case
A__ : int = strip_accents
A__ : str = pre_tok_class(**A__ )
A__ : Any = do_lower_case
def __getstate__( self ):
A__ : int = self.__dict__.copy()
A__ : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : Union[str, Any] = d
A__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Dict = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Tuple = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 192 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'convnextv2'
def __init__(self , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.0 , _lowerCamelCase=224 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Dict = num_stages
UpperCAmelCase__ : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase__ : Tuple = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Optional[int] = drop_path_rate
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Any = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase__ : List[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 0 |
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE :Union[str, Any] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
SCREAMING_SNAKE_CASE :str = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
SCREAMING_SNAKE_CASE :Any = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) ,reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] ,)
def UpperCamelCase_ ( self : Any ,A : List[str] ,A : str ,A : Optional[int]=None ,A : int=1 ,A : Optional[int]="binary" ,A : Any=None ,A : str="warn" ,):
__A = recall_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ,zero_division=A ,)
return {"recall": float(A ) if score.size == 1 else score}
| 15 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
A__ : str ='''docs/source/en/_toctree.yml'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(lowerCAmelCase )
_lowerCAmelCase = []
_lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowerCAmelCase )
_lowerCAmelCase = new_doc_list
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowerCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_lowerCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowerCAmelCase )
# Sort
return overview_doc
def UpperCamelCase__ ( lowerCAmelCase=False ):
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase = api_doc[scheduler_idx]["""sections"""]
_lowerCAmelCase = clean_doc_toc(lowerCAmelCase )
_lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def UpperCamelCase__ ( lowerCAmelCase=False ):
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase = False
_lowerCAmelCase = api_doc[pipeline_idx]["""sections"""]
_lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase = pipeline_doc["""section"""]
_lowerCAmelCase = clean_doc_toc(lowerCAmelCase )
if overwrite:
_lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase )
# sort overall pipeline doc
_lowerCAmelCase = clean_doc_toc(lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__ : Tuple =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 220 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A__ : List[str] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''facebook/nllb-200-distilled-600M'''
_lowercase: int = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase: Any = '''translator'''
_lowercase: Optional[int] = AutoTokenizer
_lowercase: str = AutoModelForSeqaSeqLM
_lowercase: List[Any] = LANGUAGE_CODES
_lowercase: Tuple = ['''text''', '''text''', '''text''']
_lowercase: List[str] = ['''text''']
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]:
return self.model.generate(**__snake_case )
def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
| 220 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> str:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FalconModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a )
SCREAMING_SNAKE_CASE__ : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Dict = FalconModel(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
SCREAMING_SNAKE_CASE__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Tuple = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
SCREAMING_SNAKE_CASE__ : Tuple = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Any = (FalconForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :List[str] = False
_SCREAMING_SNAKE_CASE :str = False
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ : str = alibi
self.model_tester.create_and_check_model(_a , *_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = """single_label_classification"""
SCREAMING_SNAKE_CASE__ : List[str] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ : Any = model._convert_cache_to_standard_format(_a , _a )
for layer in range(len(_a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : str = """multi_label_classification"""
SCREAMING_SNAKE_CASE__ : int = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : int = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[str]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_a , """use_cache""" ):
return
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_a ).to(_a )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = model(**_a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ : Dict = (
getattr(_a , """decoder_layers""" , _a )
or getattr(_a , """num_decoder_layers""" , _a )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ : Any = getattr(_a , """num_kv_heads""" , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ : str = getattr(_a , """d_model""" , config.hidden_size )
SCREAMING_SNAKE_CASE__ : str = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = outputs["""past_key_values"""]
self.assertEqual(len(_a ) , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = inputs["""input_ids"""].shape
for i in range(_a ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ : Any = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
SCREAMING_SNAKE_CASE__ : Tuple = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
SCREAMING_SNAKE_CASE__ : Any = model.generate(**_a , do_sample=_a , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(_a )[0]
self.assertEqual(_a , _a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , num_beams=2 , max_new_tokens=4 )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(device=_a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 132 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[Any] = CLIPConfig
lowerCAmelCase__ : Dict = ['CLIPEncoderLayer']
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
super().__init__(__UpperCAmelCase )
A__ = CLIPVisionModelWithProjection(config.vision_config )
A__ = nn.Linear(config.vision_config.projection_dim ,1 )
A__ = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=0.5 ,__UpperCAmelCase=0.5 ) -> Optional[Any]:
A__ = self.vision_model(__UpperCAmelCase )[0]
A__ = self.p_head(__UpperCAmelCase )
A__ = nsfw_detected.flatten()
A__ = nsfw_detected > p_threshold
A__ = nsfw_detected.tolist()
if any(__UpperCAmelCase ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__UpperCAmelCase ):
if nsfw_detected_:
A__ = np.zeros(images[idx].shape )
A__ = self.w_head(__UpperCAmelCase )
A__ = watermark_detected.flatten()
A__ = watermark_detected > w_threshold
A__ = watermark_detected.tolist()
if any(__UpperCAmelCase ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__UpperCAmelCase ):
if watermark_detected_:
A__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 154 | """simple docstring"""
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 154 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'data2vec-vision'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : str = initializer_range
__magic_name__ : Optional[Any] = layer_norm_eps
__magic_name__ : Tuple = image_size
__magic_name__ : str = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Optional[Any] = use_mask_token
__magic_name__ : Any = use_absolute_position_embeddings
__magic_name__ : Union[str, Any] = use_relative_position_bias
__magic_name__ : str = use_shared_relative_position_bias
__magic_name__ : Tuple = layer_scale_init_value
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__magic_name__ : Tuple = out_indices
__magic_name__ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
__magic_name__ : str = use_auxiliary_head
__magic_name__ : Union[str, Any] = auxiliary_loss_weight
__magic_name__ : Optional[Any] = auxiliary_channels
__magic_name__ : Dict = auxiliary_num_convs
__magic_name__ : Optional[int] = auxiliary_concat_input
__magic_name__ : Dict = semantic_loss_ignore_index
class _snake_case ( snake_case ):
UpperCamelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1e-4
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] =["image_processor", "tokenizer"]
__A : List[Any] ="LayoutLMv2ImageProcessor"
__A : List[str] =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self ,_snake_case=None ,_snake_case=None ,**_snake_case ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,_snake_case ,)
UpperCAmelCase_ : List[str] = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_snake_case ,_snake_case )
def __call__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = True ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = 0 ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCAmelCase_ : str = self.image_processor(images=_snake_case ,return_tensors=_snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Optional[Any] = features["words"]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["boxes"] ,word_labels=_snake_case ,add_special_tokens=_snake_case ,padding=_snake_case ,truncation=_snake_case ,max_length=_snake_case ,stride=_snake_case ,pad_to_multiple_of=_snake_case ,return_token_type_ids=_snake_case ,return_attention_mask=_snake_case ,return_overflowing_tokens=_snake_case ,return_special_tokens_mask=_snake_case ,return_offsets_mapping=_snake_case ,return_length=_snake_case ,verbose=_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
# add pixel values
UpperCAmelCase_ : Tuple = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[Any] = self.get_overflowing_images(_snake_case ,encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase_ : Optional[Any] = images
return encoded_inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(_snake_case )} and {len(_snake_case )}''' )
return images_with_overflow
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCamelCase__ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,_snake_case ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,_snake_case ,)
return self.image_processor
| 67 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[int] = 8
# DPR tok
UpperCAmelCase_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,"dpr_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
UpperCAmelCase_ : List[str] = os.path.join(_snake_case ,DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : str = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Optional[int] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,"bart_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
UpperCAmelCase_ : Any = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase__ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def UpperCamelCase__ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def UpperCamelCase__ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"bart_tokenizer" ) )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dummy_dataset()
UpperCAmelCase_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCAmelCase_ : List[Any] = dataset
UpperCAmelCase_ : Any = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset()
UpperCAmelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="custom" ,)
if from_disk:
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"dataset" )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname ,"index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"dataset" ) )
del dataset
UpperCAmelCase_ : List[Any] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
UpperCAmelCase_ : int = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,_snake_case ) ,)
return retriever
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,"hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" ,index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] ,open(index_file_name + ".index_meta.dpr" ,"wb" ) )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,"psgs_w100.tsv.pkl" )
UpperCAmelCase_ : Optional[Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(_snake_case ,open(_snake_case ,"wb" ) )
UpperCAmelCase_ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="legacy" ,index_path=self.tmpdirname ,)
UpperCAmelCase_ : Optional[Any] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
UpperCAmelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : int = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : List[Any] = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : str = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : List[str] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) ,_snake_case )
self.assertEqual(doc_dicts[0]["text"][0] ,"bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] ,"foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Dict = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
import torch
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : Tuple = [[5, 7], [10, 11]]
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,np.ndarray )
UpperCAmelCase_ : Optional[Any] = retriever(
_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = [[5, 7], [10, 11]]
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) ,_snake_case ) # check for doc token related keys in dictionary.
| 67 | 1 |
import functools
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or len(lowerCamelCase__ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not all(
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__snake_case : dict[str, Any] = {}
__snake_case : Optional[int] = """WORD_KEEPER"""
for word in words:
__snake_case : Optional[int] = trie
for c in word:
if c not in trie_node:
__snake_case : str = {}
__snake_case : Dict = trie_node[c]
__snake_case : Optional[int] = True
__snake_case : Any = len(lowerCamelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase ) -> bool:
if index == len_string:
return True
__snake_case : Optional[Any] = trie
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
__snake_case : Optional[Any] = trie_node.get(string[i] , lowerCamelCase__ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase__ , lowerCamelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not sentence:
return ""
A_ : Optional[int] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod() | 206 | 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase = sorted_profit_by_weight[length - i - 1]
UpperCamelCase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 244 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase = sorted_profit_by_weight[length - i - 1]
UpperCamelCase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 244 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ (UpperCamelCase__ ):
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(UpperCamelCase_ )
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(UpperCamelCase_ )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Optional[int] ,__snake_case :Optional[int] ) -> List[Any]:
a__ = self.conv_in(UpperCamelCase_ )
a__ = nn.silu(UpperCamelCase_ )
for block in self.blocks:
a__ = block(UpperCamelCase_ )
a__ = nn.silu(UpperCamelCase_ )
a__ = self.conv_out(UpperCamelCase_ )
return embedding
@flax_register_to_config
class snake_case_ (nn.Module , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = 3_2
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ : Union[bool, Tuple[bool]] = False
UpperCAmelCase__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Union[int, Tuple[int]] = 8
UpperCAmelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase__ : int = 1_2_8_0
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = "rgb"
UpperCAmelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def lowerCamelCase__( self :List[Any] ,__snake_case :jax.random.KeyArray ) -> FrozenDict:
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(UpperCamelCase_ ,dtype=jnp.floataa )
a__ = jnp.ones((1,) ,dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(UpperCamelCase_ ,dtype=jnp.floataa )
a__ , a__ = jax.random.split(UpperCamelCase_ )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )["params"]
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
a__ = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(UpperCamelCase_ ,dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
a__ = self.only_cross_attention
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(UpperCamelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(UpperCamelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase_ ,out_channels=UpperCamelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
a__ = FlaxDownBlockaD(
in_channels=UpperCamelCase_ ,out_channels=UpperCamelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(UpperCamelCase_ )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(UpperCamelCase_ )
if not is_final_block:
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(UpperCamelCase_ )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase_ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
a__ = nn.Conv(
UpperCamelCase_ ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Dict ,__snake_case :Union[str, Any] ,__snake_case :Any ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :float = 1.0 ,__snake_case :bool = True ,__snake_case :bool = False ,) -> Union[FlaxControlNetOutput, Tuple]:
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(UpperCamelCase_ ,axis=1 )
# 1. time
if not isinstance(UpperCamelCase_ ,jnp.ndarray ):
a__ = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(UpperCamelCase_ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(UpperCamelCase_ ,0 )
a__ = self.time_proj(UpperCamelCase_ )
a__ = self.time_embedding(UpperCamelCase_ )
# 2. pre-process
a__ = jnp.transpose(UpperCamelCase_ ,(0, 2, 3, 1) )
a__ = self.conv_in(UpperCamelCase_ )
a__ = jnp.transpose(UpperCamelCase_ ,(0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(UpperCamelCase_ )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
a__ , a__ = down_block(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,deterministic=not train )
else:
a__ , a__ = down_block(UpperCamelCase_ ,UpperCamelCase_ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase_ ,self.controlnet_down_blocks ):
a__ = controlnet_block(UpperCamelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(UpperCamelCase_ )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase_ ,mid_block_res_sample=UpperCamelCase_ )
| 240 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Optional[Any]=6 , UpperCamelCase_: Any=17 , UpperCamelCase_: str=23 , UpperCamelCase_: List[Any]=11 , UpperCamelCase_: Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
lowercase__ = DecisionTransformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (DecisionTransformerModel,) if is_torch_available() else ()
_lowercase : List[str] = ()
_lowercase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowercase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowercase : Tuple = False
_lowercase : str = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ = model.to(UpperCamelCase_ )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCamelCase_ )
lowercase__ = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase__ = state
lowercase__ = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase_ ):
lowercase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ = model(
states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state] , dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 110 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
class _a ( lowercase_ ):
def __init__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
super().__init__()
UpperCAmelCase_: Optional[Any] = nn.ModuleList(a__ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(a__, a__, self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = controlnet(
a__, a__, a__, a__, a__, a__, a__, a__, a__, a__, a__, )
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = down_samples, mid_sample
else:
UpperCAmelCase_: str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__, a__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> List[str]:
UpperCAmelCase_: str = 0
UpperCAmelCase_: Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__, is_main_process=a__, save_function=a__, safe_serialization=a__, variant=a__, )
idx += 1
UpperCAmelCase_: Tuple = model_path_to_save + f'_{idx}'
@classmethod
def __snake_case (cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_: Union[str, Any] = pretrained_model_path
while os.path.isdir(a__ ):
UpperCAmelCase_: Any = ControlNetModel.from_pretrained(a__, **a__ )
controlnets.append(a__ )
idx += 1
UpperCAmelCase_: Optional[int] = pretrained_model_path + f'_{idx}'
logger.info(f'{len(a__ )} controlnets loaded from {pretrained_model_path}.' )
if len(a__ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(a__ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(a__ )
| 353 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( _lowerCAmelCase ):
A = 42
A = None
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[int]=0.999 , lowerCAmelCase__: List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__: List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__: str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
UpperCAmelCase_: List[Any] = []
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_: Optional[int] = i / num_diffusion_timesteps
UpperCAmelCase_: int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = "fixed_small_log", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "squaredcos_cap_v2", ) -> List[Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCAmelCase_: Tuple = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = 1.0 - self.betas
UpperCAmelCase_: int = torch.cumprod(self.alphas, dim=0 )
UpperCAmelCase_: Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_: List[str] = 1.0
# setable values
UpperCAmelCase_: str = None
UpperCAmelCase_: str = torch.from_numpy(np.arange(0, SCREAMING_SNAKE_CASE_ )[::-1].copy() )
UpperCAmelCase_: Dict = variance_type
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> torch.FloatTensor:
return sample
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = num_inference_steps
UpperCAmelCase_: Tuple = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_: Tuple = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
if prev_timestep is None:
UpperCAmelCase_: Any = t - 1
UpperCAmelCase_: int = self.alphas_cumprod[t]
UpperCAmelCase_: Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_: int = 1 - alpha_prod_t
UpperCAmelCase_: List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_: List[str] = self.betas[t]
else:
UpperCAmelCase_: List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_: Tuple = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_: List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_: str = torch.log(torch.clamp(SCREAMING_SNAKE_CASE_, min=1E-20 ) )
UpperCAmelCase_: Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_: Dict = variance.log()
UpperCAmelCase_: Tuple = beta.log()
UpperCAmelCase_: int = (predicted_variance + 1) / 2
UpperCAmelCase_: int = frac * max_log + (1 - frac) * min_log
return variance
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = True, ) -> Union[UnCLIPSchedulerOutput, Tuple]:
UpperCAmelCase_: List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ , UpperCAmelCase_: List[str] = torch.split(SCREAMING_SNAKE_CASE_, sample.shape[1], dim=1 )
else:
UpperCAmelCase_: Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_: List[Any] = t - 1
UpperCAmelCase_: Optional[int] = self.alphas_cumprod[t]
UpperCAmelCase_: Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_: Optional[Any] = 1 - alpha_prod_t
UpperCAmelCase_: Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_: Tuple = self.betas[t]
UpperCAmelCase_: Dict = self.alphas[t]
else:
UpperCAmelCase_: List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_: List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_: Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_: int = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_: Optional[int] = torch.clamp(
SCREAMING_SNAKE_CASE_, -self.config.clip_sample_range, self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_: Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_: Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_: List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_: Union[str, Any] = 0
if t > 0:
UpperCAmelCase_: Any = randn_tensor(
model_output.shape, dtype=model_output.dtype, generator=SCREAMING_SNAKE_CASE_, device=model_output.device )
UpperCAmelCase_: Dict = self._get_variance(
SCREAMING_SNAKE_CASE_, predicted_variance=SCREAMING_SNAKE_CASE_, prev_timestep=SCREAMING_SNAKE_CASE_, )
if self.variance_type == "fixed_small_log":
UpperCAmelCase_: Optional[int] = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_: Dict = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
UpperCAmelCase_: int = variance * variance_noise
UpperCAmelCase_: List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_, pred_original_sample=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase_: Tuple = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype )
UpperCAmelCase_: Union[str, Any] = timesteps.to(original_samples.device )
UpperCAmelCase_: Dict = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_: int = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_: str = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_: Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_: Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_: Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_: List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 82 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a_ = pd.read_csv("""sample_data.csv""", header=None)
a_ = df.shape[:1][0]
# If you're using some other dataset input the target column
a_ = df.iloc[:, 1:2]
a_ = actual_data.values.reshape(len_data, 1)
a_ = MinMaxScaler().fit_transform(actual_data)
a_ = 10
a_ = 5
a_ = 20
a_ = len_data - periods * look_back
a_ = actual_data[:division]
a_ = actual_data[division - look_back :]
a_ , a_ = [], []
a_ , a_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a_ = np.array(train_x)
a_ = np.array(test_x)
a_ = np.array([list(i.ravel()) for i in train_y])
a_ = np.array([list(i.ravel()) for i in test_y])
a_ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
a_ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
a_ = model.predict(x_test)
| 330 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 315 |
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
_snake_case = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U]):
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = key
_lowerCAmelCase : Any = val
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
'''simple docstring'''
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
class UpperCAmelCase_ ( Generic[T, U]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__a, __a)
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__a, __a)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.rear, self.head
def __repr__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ["DoubleLinkedList"]
_lowerCAmelCase : str = self.head
while node.next is not None:
rep.append(str(__a))
_lowerCAmelCase : Tuple = node.next
rep.append(str(self.rear))
return ",\n ".join(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase : Union[str, Any] = node
_lowerCAmelCase : Union[str, Any] = previous
_lowerCAmelCase : Tuple = node
_lowerCAmelCase : Optional[Any] = self.rear
def snake_case__ ( self, __a):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_lowerCAmelCase : List[str] = node.next
_lowerCAmelCase : Union[str, Any] = node.prev
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Optional[Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U]):
lowerCamelCase__ = {}
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowerCAmelCase : int = capacity
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
'''simple docstring'''
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self, __a):
'''simple docstring'''
return key in self.cache
def snake_case__ ( self, __a):
'''simple docstring'''
if key in self.cache:
self.hits += 1
_lowerCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowerCAmelCase : Optional[int] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a)
return node.val
self.miss += 1
return None
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase : str = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase : Union[str, Any] = DoubleLinkedListNode(__a, __a)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase : Optional[Any] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
_lowerCAmelCase : List[str] = value
self.list.add(__a)
@classmethod
def snake_case__ ( cls, __a = 128):
'''simple docstring'''
def cache_decorator_inner(__a) -> Callable[..., U]:
def cache_decorator_wrapper(*__a) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase : Tuple = LRUCache(__a)
_lowerCAmelCase : Dict = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
_lowerCAmelCase : Optional[int] = func(*__a)
cls.decorator_function_to_instance_map[func].put(args[0], __a)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a, "cache_info", __a) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase = 12_8022
_lowerCAmelCase = 12_8028
@require_sentencepiece
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = MaMaaaTokenizer
__lowercase : List[Any] = False
__lowercase : Any = False
__lowercase : Optional[int] = True
def UpperCAmelCase_ ( self ) -> List[str]:
super().setUp()
lowerCAmelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : int = Path(self.tmpdirname )
save_json(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCAmelCase__ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Any:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : int = """</s>"""
lowerCAmelCase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<s>""" )
self.assertEqual(len(__UpperCAmelCase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def UpperCAmelCase_ ( self ) -> int:
pass
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[2, 3, 4, 5, 6] ,)
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__UpperCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,"""This is a test""" )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase__ : Dict = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""facebook/m2m100_418M""" ,revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
__lowercase : str = '''facebook/m2m100_418M'''
__lowercase : int = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__lowercase : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__lowercase : List[Any] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def UpperCAmelCase_ ( cls ) -> Any:
lowerCAmelCase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en""" ,tgt_lang="""fr""" )
lowerCAmelCase__ : str = 1
return cls
def UpperCAmelCase_ ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) ,12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) ,12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) ,12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) ,12_8063 )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCAmelCase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] ,3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[str] = """en"""
lowerCAmelCase__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
self.assertIn(__UpperCAmelCase ,self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase__ : Tuple = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
lowerCAmelCase__ : Optional[int] = self.tokenizer.decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id ,__UpperCAmelCase )
@require_torch
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : str = """en"""
lowerCAmelCase__ : str = """fr"""
lowerCAmelCase__ : Tuple = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__UpperCAmelCase ,return_tensors="""pt""" )
lowerCAmelCase__ : Dict = shift_tokens_right(
batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
lowerCAmelCase__ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Tuple = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
lowerCAmelCase__ : int = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCAmelCase__ : Optional[Any] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = self.tokenizer._build_translation_inputs("""A test""" ,return_tensors="""pt""" ,src_lang="""en""" ,tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} ,)
| 184 |
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = len(grid[0] )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
lowerCAmelCase__ : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase__ : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase__ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase__ : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase__ : Dict = max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
lowerCAmelCase__ : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCAmelCase__ : Dict = [[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 184 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _snake_case( ) -> str:
lowercase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCamelCase )
return parser.parse_args()
def _snake_case( ) -> Optional[Any]:
lowercase : List[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Union[str, Any] = script_fpath.stem
lowercase : Optional[Any] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
lowercase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 20 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['DeiTFeatureExtractor']
__A = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 364 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )-> str:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
def _snake_case ( self )-> int:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self )-> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _snake_case ( self )-> Tuple:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.prepare_config_and_inputs()
lowerCamelCase_ =True
lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ =NezhaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Tuple:
lowerCamelCase_ =True
lowerCamelCase_ =NezhaModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =NezhaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =NezhaForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =NezhaForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =NezhaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =NezhaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =NezhaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =NezhaForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase:int = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase:Tuple = True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Optional[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Dict:
lowerCamelCase_ =NezhaModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> str:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase_ =None
self.model_tester.create_and_check_model_as_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =NezhaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Any:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Dict:
lowerCamelCase_ =NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 49 | 0 |
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Any ):
__lowercase = Rectangle(height=0.5, width=0.5 )
__lowercase = Rectangle(height=0.25, width=0.25 )
__lowercase = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("CPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("GPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Model", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
__lowercase = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=UpperCAmelCase__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=UpperCAmelCase__, buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Loaded Checkpoint", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = fill.copy().set_fill(UpperCAmelCase__, opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""", font_size=1_8, )
blue_text.next_to(UpperCAmelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
__lowercase = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""", font_size=2_4, )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Disk", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ), Write(UpperCAmelCase__, run_time=1 ), Create(UpperCAmelCase__, run_time=1 ) )
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__, run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
__lowercase = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""", font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__, UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ ), )
self.wait()
| 17 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 109 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 109 | 1 |
"""simple docstring"""
import os
def a__ ( SCREAMING_SNAKE_CASE : str = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as in_file:
lowerCAmelCase : Any = in_file.read()
lowerCAmelCase : Optional[Any] = [[int(SCREAMING_SNAKE_CASE ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCAmelCase : Union[str, Any] = [[0 for cell in row] for row in grid]
lowerCAmelCase : Dict = len(grid[0] )
lowerCAmelCase : Tuple = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
lowerCAmelCase : str = grid[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = grid[i][0] + dp[i - 1][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 108 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str:
_UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
_UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
_UpperCAmelCase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[str] = """first_stage_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = state_dict[key]
_UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params
_UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params
_UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval()
vqvae.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval()
unet.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, )
_UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
pipeline.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 246 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''AutoImageProcessor'''
lowercase__ = '''AutoTokenizer'''
def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
A__ = self.image_processor
def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 230 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : Optional[int]=2,lowercase_ : Union[str, Any]=8,lowercase_ : int=True,lowercase_ : Union[str, Any]=True,lowercase_ : str=True,lowercase_ : str=True,lowercase_ : int=9_9,lowercase_ : int=1_6,lowercase_ : Union[str, Any]=5,lowercase_ : List[Any]=2,lowercase_ : Optional[Any]=3_6,lowercase_ : List[Any]="gelu",lowercase_ : str=0.0,lowercase_ : List[str]=0.0,lowercase_ : str=5_1_2,lowercase_ : Optional[int]=1_6,lowercase_ : int=2,lowercase_ : Optional[int]=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : Tuple=4,lowercase_ : str=None,)-> str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
A__ = self.get_config()
A__ = 3_0_0
return config
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self : int,lowercase_ : Dict,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : Tuple )-> str:
'''simple docstring'''
A__ = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Dict,lowercase_ : str,lowercase_ : Dict,)-> List[str]:
'''simple docstring'''
A__ = True
A__ = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,encoder_hidden_states=lowercase_,encoder_attention_mask=lowercase_,)
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,encoder_hidden_states=lowercase_,)
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Any,lowercase_ : int,lowercase_ : Optional[Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : str )-> Optional[int]:
'''simple docstring'''
A__ = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : int,lowercase_ : int,lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Union[str, Any],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : int )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Dict,lowercase_ : List[Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : str,lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = self.num_choices
A__ = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = ()
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
A__ = MraModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='MRA does not output attentions' )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
return
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int )-> str:
'''simple docstring'''
A__ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
A__ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
A__ = model(lowercase_ )[0]
A__ = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape,lowercase_ )
A__ = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
@slow
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
A__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
A__ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
A__ = model(lowercase_ )[0]
A__ = 5_0_2_6_5
A__ = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape,lowercase_ )
A__ = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
@slow
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
A__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
A__ = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
A__ = model(lowercase_ )[0]
A__ = 5_0_2_6_5
A__ = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape,lowercase_ )
A__ = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
| 7 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ = False
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionAttendAndExcitePipeline
lowerCamelCase = False
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case__ ( cls : Any )-> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = A__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = pipe(**lowercase_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 6_4, 6_4, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_,1E-3 )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Any )-> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : int )-> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = torch.manual_seed(5_1 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa )
pipe.to('cuda' )
A__ = 'a painting of an elephant with glasses'
A__ = [5, 7]
A__ = pipe(
prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0]
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 7 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( snake_case__ ):
def __init__(self :Tuple )-> List[Any]:
__A = []
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , **_UpperCamelCase :Optional[Any] )-> Optional[Any]:
self.events.append('''on_init_end''' )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Any , _UpperCamelCase :str , **_UpperCamelCase :Optional[Any] )-> List[str]:
self.events.append('''on_train_begin''' )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :int , **_UpperCamelCase :Any )-> str:
self.events.append('''on_train_end''' )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :int , **_UpperCamelCase :Optional[int] )-> Tuple:
self.events.append('''on_epoch_begin''' )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :str , _UpperCamelCase :Dict , _UpperCamelCase :Tuple , **_UpperCamelCase :List[Any] )-> List[Any]:
self.events.append('''on_epoch_end''' )
def _lowerCAmelCase (self :Any , _UpperCamelCase :int , _UpperCamelCase :int , _UpperCamelCase :Any , **_UpperCamelCase :List[Any] )-> Any:
self.events.append('''on_step_begin''' )
def _lowerCAmelCase (self :str , _UpperCamelCase :Dict , _UpperCamelCase :Any , _UpperCamelCase :Dict , **_UpperCamelCase :Tuple )-> Tuple:
self.events.append('''on_step_end''' )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Any , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] , **_UpperCamelCase :Optional[int] )-> Dict:
self.events.append('''on_evaluate''' )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[int] , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> List[Any]:
self.events.append('''on_predict''' )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Any , **_UpperCamelCase :Optional[Any] )-> Any:
self.events.append('''on_save''' )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any , _UpperCamelCase :List[str] , **_UpperCamelCase :Union[str, Any] )-> Optional[int]:
self.events.append('''on_log''' )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[Any] , **_UpperCamelCase :List[str] )-> List[Any]:
self.events.append('''on_prediction_step''' )
@require_torch
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Optional[int] )-> int:
__A = tempfile.mkdtemp()
def _lowerCAmelCase (self :List[str] )-> List[Any]:
shutil.rmtree(self.output_dir )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[str]=0 , _UpperCamelCase :int=0 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :Any=False , **_UpperCamelCase :List[str] )-> Optional[int]:
__A = RegressionDataset(length=UpperCAmelCase_ )
__A = RegressionDataset(length=UpperCAmelCase_ )
__A = RegressionModelConfig(a=UpperCAmelCase_ , b=UpperCAmelCase_ )
__A = RegressionPreTrainedModel(UpperCAmelCase_ )
__A = TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase_ , report_to=[] , **UpperCAmelCase_ )
return Trainer(
UpperCAmelCase_ , UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , callbacks=UpperCAmelCase_ , )
def _lowerCAmelCase (self :int , _UpperCamelCase :int , _UpperCamelCase :List[Any] )-> Any:
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
# Order doesn't matter
__A = sorted(UpperCAmelCase_ , key=lambda _UpperCamelCase : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
__A = sorted(UpperCAmelCase_ , key=lambda _UpperCamelCase : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , cba.__class__ )
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(cba.__class__ , UpperCAmelCase_ )
else:
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[Any] )-> Optional[Any]:
__A = ["on_init_end", "on_train_begin"]
__A = 0
__A = len(trainer.get_eval_dataloader() )
__A = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(UpperCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowerCAmelCase (self :str )-> Any:
__A = self.get_trainer()
__A = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__A = self.get_trainer(disable_tqdm=UpperCAmelCase_ )
__A = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def _lowerCAmelCase (self :Any )-> Optional[int]:
__A = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__A = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
__A = self.get_trainer()
__A = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(cb.__class__ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# We can also add, pop, or remove by instance
__A = self.get_trainer()
__A = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
__A = self.get_trainer()
__A = trainer.callback_handler.callbacks[0]
__A = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def _lowerCAmelCase (self :int )-> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=UpperCAmelCase_ )
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# Independent log/save/eval
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
__A = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# A bit of everything
__A = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__A = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase_ ) in warn_mock.call_args[0][0]
| 365 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( lowerCamelCase: Dict=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
__A = subparsers.add_parser('''test''' )
else:
__A = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__A = script_name
else:
__A = F"""--config_file={args.config_file} {script_name}"""
__A = ['''accelerate-launch'''] + test_args.split()
__A = execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _a ( ) -> str:
'''simple docstring'''
__A = test_command_parser()
__A = parser.parse_args()
test_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 250 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.