code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict="eval" ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""{split}_results.json""" )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , 'r' ) as f:
return json.load(_UpperCamelCase )
raise ValueError(f"""can't find {path}""" )
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertLess(result['eval_perplexity'] , 1_0_0 )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 1_0 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertLess(result['eval_perplexity'] , 4_2 )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_qa.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_f1'] , 3_0 )
self.assertGreaterEqual(result['eval_exact'] , 3_0 )
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["DPTFeatureExtractor"]
a_ : Optional[int] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ : List[str] = NewType("DataClass", Any)
a_ : Optional[Any] = NewType("DataClassType", Any)
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowerCAmelCase ( _UpperCamelCase : list ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {str(_UpperCamelCase ): choice for choice in choices}
return lambda _UpperCamelCase : str_to_choice.get(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( *,
_UpperCamelCase : Union[str, List[str]] = None , _UpperCamelCase : str = None , _UpperCamelCase : Any = dataclasses.MISSING , _UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , _UpperCamelCase : dict = None , **_UpperCamelCase : Optional[int] , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE = {}
if aliases is not None:
SCREAMING_SNAKE_CASE = aliases
if help is not None:
SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=_UpperCamelCase , default=_UpperCamelCase , default_factory=_UpperCamelCase , **_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
def __init__( self : List[Any] , snake_case__ : Union[DataClassType, Iterable[DataClassType]] , **snake_case__ : str ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
SCREAMING_SNAKE_CASE = [dataclass_types]
SCREAMING_SNAKE_CASE = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def UpperCamelCase ( snake_case__ : ArgumentParser , snake_case__ : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = F"""--{field.name}"""
SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE = kwargs.pop('aliases' , [] )
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [aliases]
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(snake_case__ , 'UnionType' ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE = field.type.__args__
else:
SCREAMING_SNAKE_CASE = [x.value for x in field.type]
SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
else:
SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = field.type.__args__[0]
SCREAMING_SNAKE_CASE = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
else:
SCREAMING_SNAKE_CASE = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : DataClassType ):
"""simple docstring"""
if hasattr(snake_case__ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE = self
try:
SCREAMING_SNAKE_CASE = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(snake_case__ ):
SCREAMING_SNAKE_CASE = '.'.join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Union[str, Any]=None , snake_case__ : str=False , snake_case__ : Dict=True , snake_case__ : Optional[int]=None , snake_case__ : str=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=snake_case__ )
SCREAMING_SNAKE_CASE = vars(snake_case__ ).get(args_file_flag.lstrip('-' ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.parse_known_args(args=snake_case__ )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase ( self : int , snake_case__ : Dict[str, Any] , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = set(args.keys() )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the ๐ค hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Tuple = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["PoolFormerFeatureExtractor"]
a_ : Tuple = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
from __future__ import annotations
a_ : Optional[Any] = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCAmelCase ( _UpperCamelCase : Matrix , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = digit
if sudoku(_UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE = 0
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCamelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
from __future__ import annotations
from cmath import sqrt
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(_UpperCamelCase )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE = downstream_dict['objective.W']
return model
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE = checkpoint['Downstream']
SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a_ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int = 2_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
for i in range(_UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=7 , snake_case__ : List[Any]=3 , snake_case__ : str=1_8 , snake_case__ : Dict=3_0 , snake_case__ : Tuple=4_0_0 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=None , snake_case__ : int=True , snake_case__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 2_0}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case__ , 'crop_size' ) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 706 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
SCREAMING_SNAKE_CASE = num
SCREAMING_SNAKE_CASE = 0
while num > 0:
SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a_ : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a_ : int = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a_ : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = random.randint(0 , len(_UpperCamelCase ) - 1 )
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : tuple[str, float] , _UpperCamelCase : list[tuple[str, float]] , _UpperCamelCase : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = population_score[random.randint(0 , _UpperCamelCase )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] , _UpperCamelCase : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_UpperCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE = []
for _ in range(_UpperCamelCase ):
population.append(''.join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
a_ : Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
a_ : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'รจรฉรฒร โฌรน=)(&%$ยฃ/\\"
)
a_ : List[str] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , snake_case__ : Any="" , snake_case__ : Tuple="train" ):
"""simple docstring"""
assert os.path.isdir(snake_case__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : int , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.documents[idx]
SCREAMING_SNAKE_CASE = document_path.split('/' )[-1]
with open(snake_case__ , encoding='utf-8' ) as source:
SCREAMING_SNAKE_CASE = source.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
SCREAMING_SNAKE_CASE = [_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = deque(_UpperCamelCase )
while True:
try:
SCREAMING_SNAKE_CASE = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : not t.startswith('@highlight' ) , _UpperCamelCase ) )
return story_lines, summary_lines
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.ones_like(_UpperCamelCase )
SCREAMING_SNAKE_CASE = sequence == pad_token_id
SCREAMING_SNAKE_CASE = 0
return mask
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [tokenizer.encode(_UpperCamelCase ) for line in story_lines]
SCREAMING_SNAKE_CASE = [token for sentence in story_lines_token_ids for token in sentence]
SCREAMING_SNAKE_CASE = [tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
SCREAMING_SNAKE_CASE = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for sequence in batch:
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 709 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
a_ : Union[str, Any] = "Tobias Carryer"
from time import time
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int]=int(time() ) ): # noqa: B008
"""simple docstring"""
SCREAMING_SNAKE_CASE = multiplier
SCREAMING_SNAKE_CASE = increment
SCREAMING_SNAKE_CASE = modulo
SCREAMING_SNAKE_CASE = seed
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a_ : List[Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : Tuple=2 , snake_case__ : int=3 , snake_case__ : Any=6_4 , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.random.default_rng(snake_case__ )
SCREAMING_SNAKE_CASE = length
SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
"""simple docstring"""
return self.length
def __getitem__( self : Dict , snake_case__ : int ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class UpperCamelCase ( torch.nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : List[Any]=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE = False
return x * self.a[0] + self.b[0]
class UpperCamelCase ( torch.nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Tuple=0 , snake_case__ : int=0 , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self : Tuple , snake_case__ : Any=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE = False
return x * self.a + self.b
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int = 16 ) -> Any:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE = load_dataset('csv' , data_files=_UpperCamelCase )
SCREAMING_SNAKE_CASE = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 711 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ : Optional[int] = pytest.mark.integration
a_ : Optional[int] = {"comet"}
a_ : List[str] = importlib.util.find_spec("fairseq") is not None
a_ : Tuple = {"code_eval"}
a_ : List[Any] = os.name == "nt"
a_ : Dict = {"bertscore", "frugalscore", "perplexity"}
a_ : Union[str, Any] = importlib.util.find_spec("transformers") is not None
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : List[str] , _UpperCamelCase : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : str , _UpperCamelCase : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : Union[str, Any] , _UpperCamelCase : Any ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@local
class UpperCamelCase ( parameterized.TestCase ):
__UpperCamelCase ={}
__UpperCamelCase =None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[...]'
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case__ ) ).module_path )
SCREAMING_SNAKE_CASE = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
SCREAMING_SNAKE_CASE = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[...]'
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def UpperCamelCase ( self : Any ):
"""simple docstring"""
def load_local_metric(snake_case__ : Tuple , *snake_case__ : int , **snake_case__ : List[str] ):
return load_metric(os.path.join('metrics' , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch('datasets.load_metric' ) as mock_load_metric:
SCREAMING_SNAKE_CASE = load_local_metric
yield
@classmethod
def UpperCamelCase ( cls : Any , snake_case__ : Any ):
"""simple docstring"""
def wrapper(snake_case__ : Any ):
SCREAMING_SNAKE_CASE = contextmanager(snake_case__ )
SCREAMING_SNAKE_CASE = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
import torch
def bert_cos_score_idf(_UpperCamelCase : List[Any] , _UpperCamelCase : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
def load_from_checkpoint(_UpperCamelCase : Any ):
class UpperCamelCase :
def UpperCamelCase ( self : str , snake_case__ : Any , *snake_case__ : List[Any] , **snake_case__ : Dict ):
"""simple docstring"""
assert len(snake_case__ ) == 2
SCREAMING_SNAKE_CASE = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
SCREAMING_SNAKE_CASE = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE = load_from_checkpoint
yield
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_metric(os.path.join('metrics' , 'seqeval' ) )
SCREAMING_SNAKE_CASE = 'ERROR'
SCREAMING_SNAKE_CASE = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCamelCase )
| 712 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="open-llama"
def __init__( self : Dict , snake_case__ : List[str]=1_0_0_0_0_0 , snake_case__ : Dict=4_0_9_6 , snake_case__ : Dict=1_1_0_0_8 , snake_case__ : Optional[int]=3_2 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any="silu" , snake_case__ : Optional[int]=2_0_4_8 , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : Dict=True , snake_case__ : List[str]=0 , snake_case__ : Optional[int]=1 , snake_case__ : int=2 , snake_case__ : Any=False , snake_case__ : Dict=True , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Dict=None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = rms_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_dropout_prob
SCREAMING_SNAKE_CASE = use_stable_embedding
SCREAMING_SNAKE_CASE = shared_input_output_embedding
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get('type' , snake_case__ )
SCREAMING_SNAKE_CASE = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 713 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 0 |
import random
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = a[left_index]
SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , _UpperCamelCase ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : str ) -> str:
'''simple docstring'''
if left < right:
SCREAMING_SNAKE_CASE = random.randint(_UpperCamelCase , right - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE = partition(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
quick_sort_random(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCamelCase , pivot_index + 1 , _UpperCamelCase ) # recursive quicksort to the right of the pivot point
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n' ).strip()
SCREAMING_SNAKE_CASE = [int(_UpperCamelCase ) for item in user_input.split(',' )]
quick_sort_random(_UpperCamelCase , 0 , len(_UpperCamelCase ) )
print(_UpperCamelCase )
if __name__ == "__main__":
main()
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
a_ : int = 5
a_ : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =SpeechaTextTokenizer
__UpperCamelCase =False
__UpperCamelCase =True
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
SCREAMING_SNAKE_CASE = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 1_0_0_1 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'รฉ', '.'] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase ="valhalla/s2t_mustc_multilinguial_medium"
__UpperCamelCase ="C'est trop cool"
__UpperCamelCase ="Esto es genial"
@classmethod
def UpperCamelCase ( cls : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1 )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
SCREAMING_SNAKE_CASE = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 715 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
SCREAMING_SNAKE_CASE = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def __lowerCAmelCase ( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
SCREAMING_SNAKE_CASE = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + '_v'
if name is not None:
SCREAMING_SNAKE_CASE = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
SCREAMING_SNAKE_CASE = flax_model.params['params']
else:
SCREAMING_SNAKE_CASE = flax_model.params
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
import torch
# Load the index
SCREAMING_SNAKE_CASE = {}
for shard_file in shard_filenames:
# load using msgpack utils
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE = flax_model.params['params']
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
SCREAMING_SNAKE_CASE = flax_model.params
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , 'rb' ) as state_f:
try:
SCREAMING_SNAKE_CASE = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = pt_model.state_dict()
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE = flax_key_tuple[0] == pt_model.base_model_prefix
SCREAMING_SNAKE_CASE = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
SCREAMING_SNAKE_CASE = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
SCREAMING_SNAKE_CASE = '.'.join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
SCREAMING_SNAKE_CASE = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = None
if key_components[-3::2] == ["parametrizations", "original0"]:
SCREAMING_SNAKE_CASE = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
SCREAMING_SNAKE_CASE = key_components[-2] + '_v'
if name is not None:
SCREAMING_SNAKE_CASE = key_components[:-3] + [name]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE = key
if flax_key in special_pt_names:
SCREAMING_SNAKE_CASE = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
a_ : Any = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
a_ : List[Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
a_ : Any = os.environ.get("USER_TOKEN", "")
def __lowerCAmelCase ( _UpperCamelCase : str ) -> dict[Any, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'Authorization': f"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 717 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __lowerCAmelCase ( _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=_UpperCamelCase )
parser.add_argument(
'--config_file' , default=_UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="wav2vec2"
def __init__( self : Dict , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Tuple=7_6_8 , snake_case__ : str=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : Dict=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=0.02 , snake_case__ : Tuple=1E-5 , snake_case__ : Optional[int]="group" , snake_case__ : int="gelu" , snake_case__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : Union[str, Any]=1_2_8 , snake_case__ : Optional[Any]=1_6 , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=0.05 , snake_case__ : Dict=1_0 , snake_case__ : List[str]=2 , snake_case__ : Any=0.0 , snake_case__ : int=1_0 , snake_case__ : int=0 , snake_case__ : Optional[int]=3_2_0 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=1_0_0 , snake_case__ : Any=2_5_6 , snake_case__ : List[Any]=2_5_6 , snake_case__ : str=0.1 , snake_case__ : Optional[int]="sum" , snake_case__ : Any=False , snake_case__ : Optional[Any]=False , snake_case__ : str=2_5_6 , snake_case__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case__ : Dict=(5, 3, 3, 1, 1) , snake_case__ : Optional[int]=(1, 2, 3, 1, 1) , snake_case__ : List[Any]=5_1_2 , snake_case__ : Optional[int]=0 , snake_case__ : int=1 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=False , snake_case__ : Any=3 , snake_case__ : Tuple=2 , snake_case__ : str=3 , snake_case__ : Any=None , snake_case__ : Dict=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
if n_element < 1:
SCREAMING_SNAKE_CASE = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (0, 0, 0)
SCREAMING_SNAKE_CASE = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a_ : Dict = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
a_ : Any = hamming(int(n))
print("-----------------------------------------------------")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =inspect.getfile(accelerate.test_utils )
__UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
__UpperCamelCase =["accelerate", "launch"]
__UpperCamelCase =Path.home() / ".cache/huggingface/accelerate"
__UpperCamelCase ="default_config.yaml"
__UpperCamelCase =config_folder / config_file
__UpperCamelCase =config_folder / "_default_config.yaml"
__UpperCamelCase =Path("tests/test_configs" )
@classmethod
def UpperCamelCase ( cls : str ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase ( cls : Optional[int] ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=snake_case__ ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(snake_case__ ), self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase ( self : str ):
"""simple docstring"""
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase ="test-tpu"
__UpperCamelCase ="us-central1-a"
__UpperCamelCase ="ls"
__UpperCamelCase =["accelerate", "tpu-config"]
__UpperCamelCase ="cd /usr/share"
__UpperCamelCase ="tests/test_samples/test_command_file.sh"
__UpperCamelCase ="Running gcloud compute tpus tpu-vm ssh"
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=snake_case__ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the ๐ค hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : List[Any] = "src/diffusers"
a_ : Any = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ : Union[str, Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : Union[str, Any] = spec.loader.load_module()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , _UpperCamelCase ) is not None
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = object_name.split('.' )
SCREAMING_SNAKE_CASE = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_UpperCamelCase , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
return "".join(_UpperCamelCase )
a_ : Optional[int] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ : Optional[Any] = re.compile(R"<FILL\s+[^>]*>")
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = code.split('\n' )
SCREAMING_SNAKE_CASE = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE = f"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
SCREAMING_SNAKE_CASE = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = style_docstrings_in_code(_UpperCamelCase )
return result[len('class Bla:\n' ) :] if has_indent else result
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search.groups()
SCREAMING_SNAKE_CASE = find_code_in_diffusers(_UpperCamelCase )
SCREAMING_SNAKE_CASE = get_indent(_UpperCamelCase )
SCREAMING_SNAKE_CASE = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE = theoretical_indent
SCREAMING_SNAKE_CASE = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(f"""^{indent}# End copy""" , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
SCREAMING_SNAKE_CASE = ''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = replace_pattern.replace('with' , '' ).split(',' )
SCREAMING_SNAKE_CASE = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pattern.groups()
SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
SCREAMING_SNAKE_CASE = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = glob.glob(os.path.join(_UpperCamelCase , '**/*.py' ) , recursive=_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for filename in all_files:
SCREAMING_SNAKE_CASE = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
import sys
a_ : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
for digit in s:
product *= int(_UpperCamelCase )
return product
def __lowerCAmelCase ( _UpperCamelCase : str = N ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = -sys.maxsize - 1
SCREAMING_SNAKE_CASE = n[:13]
SCREAMING_SNAKE_CASE = 13
while cur_index < len(_UpperCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , str_eval(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab.json' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = HubertForCTC(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertModel(_UpperCamelCase )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a_ : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Any = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self : Tuple , snake_case__ : str=7_6_8 , snake_case__ : int=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : List[Any]="gelu" , snake_case__ : Any=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[Any]=1E-12 , snake_case__ : List[Any]=1_6 , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=1_0 , snake_case__ : Any=1_0 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : Dict=1_2_8 , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = frequency_stride
SCREAMING_SNAKE_CASE = time_stride
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = num_mel_bins | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Any = "โ"
a_ : List[str] = {"vocab_file": "spiece.model"}
a_ : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
a_ : Any = {
"google/pegasus-xsum": 512,
}
a_ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str]="<pad>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : int="<unk>" , snake_case__ : str="<mask_2>" , snake_case__ : Tuple="<mask_1>" , snake_case__ : Tuple=None , snake_case__ : Dict=1_0_3 , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(snake_case__ )}, but is"""
F""" {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
SCREAMING_SNAKE_CASE = mask_token_sent
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : str , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE = self.sp_model.piece_to_id(snake_case__ )
return sp_id + self.offset
def UpperCamelCase ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCamelCase ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase ( self : Any , snake_case__ : Any=False ):
"""simple docstring"""
return 1
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : Optional[int]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_UpperCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_UpperCamelCase )
return parser.parse_args()
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE = script_fpath.stem
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =1
@register_to_config
def __init__( self : int , snake_case__ : int = 1_0_0_0 , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(snake_case__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE = 4
# running values
SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE = timesteps.to(snake_case__ )
SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
SCREAMING_SNAKE_CASE = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE = timestep_index + 1
SCREAMING_SNAKE_CASE = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
SCREAMING_SNAKE_CASE = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE = self._get_prev_sample(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : torch.FloatTensor , *snake_case__ : List[Any] , **snake_case__ : int ):
"""simple docstring"""
return sample
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE = self.betas[timestep_index]
SCREAMING_SNAKE_CASE = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE = (sample - sigma * ets) / max(snake_case__ , 1E-8 )
SCREAMING_SNAKE_CASE = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Any ):
"""simple docstring"""
return self.config.num_train_timesteps
| 706 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Dict = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(snake_case__ )
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Any=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index('---' ) + 1
SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
# class attributes
__UpperCamelCase ={"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
if path.exists():
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(snake_case__ )
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls : Dict , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding='utf-8' , ).decode('utf-8' )
a_ : int = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a_ : Optional[int] = ap.parse_args()
a_ : List[str] = Path(args.readme_filepath)
a_ : str = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Tuple = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
def __init__( self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : bool = True , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(snake_case__ , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def UpperCamelCase ( self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Any ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case__ , **snake_case__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(snake_case__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
self.scheduler.step(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.scheduler.state_dict()
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
self.scheduler.load_state_dict(snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.scheduler.get_lr()
def UpperCamelCase ( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
"""simple docstring"""
return self.scheduler.print_lr(*snake_case__ , **snake_case__ )
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 19_01
SCREAMING_SNAKE_CASE = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 711 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : list[list[int | float]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(matrix[0] )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , _UpperCamelCase )
for row in range(_UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(_UpperCamelCase , _UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , _UpperCamelCase ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =XLMRobertaTokenizer
__UpperCamelCase =XLMRobertaTokenizerFast
__UpperCamelCase =True
__UpperCamelCase =True
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1_0_0_2 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=snake_case__ )
SCREAMING_SNAKE_CASE = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsรฉ.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 713 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[int] = "โ"
a_ : Optional[Any] = {"vocab_file": "spiece.model"}
a_ : Dict = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
a_ : Optional[int] = {
"google/reformer-crime-and-punishment": 52_4288,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int]="</s>" , snake_case__ : Dict="<unk>" , snake_case__ : List[Any]=[] , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(snake_case__ )
return token
def UpperCamelCase ( self : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Any = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : Tuple = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
a_ : int = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
a_ : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
a_ : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
a_ : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
a_ : int = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =DPRContextEncoderTokenizer
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =DPRQuestionEncoderTokenizer
a_ : Tuple = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
a_ : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
a_ : int = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase :
def __call__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Union[bool, str] = False , snake_case__ : Union[bool, str] = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[bool] = None , **snake_case__ : str , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = titles if not isinstance(snake_case__ , snake_case__ ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(snake_case__ , snake_case__ ) else [texts]
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages
assert len(snake_case__ ) == len(
snake_case__ ), F"""There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts."""
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )['input_ids']
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )['input_ids']
SCREAMING_SNAKE_CASE = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : BatchEncoding , snake_case__ : DPRReaderOutput , snake_case__ : int = 1_6 , snake_case__ : int = 6_4 , snake_case__ : int = 4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = reader_input['input_ids']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : List[int] , snake_case__ : int , snake_case__ : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(snake_case__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
SCREAMING_SNAKE_CASE = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =["input_ids", "attention_mask"]
__UpperCamelCase =DPRReaderTokenizer
| 715 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
__UpperCamelCase =BlenderbotSmallConfig
__UpperCamelCase ={}
__UpperCamelCase ="gelu"
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any]=1_3 , snake_case__ : List[Any]=7 , snake_case__ : str=True , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Optional[int]=3_2 , snake_case__ : str=2 , snake_case__ : str=4 , snake_case__ : Dict=3_7 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]=2_0 , snake_case__ : int=2 , snake_case__ : str=1 , snake_case__ : Union[str, Any]=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase =(
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =[
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__UpperCamelCase ="facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase ( self : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return generator, ["Something to write", "Something else"]
def UpperCamelCase ( self : Dict , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = generator('Something there' )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
with self.assertRaises(snake_case__ ):
generator(4 )
@require_torch
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ''}] )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=snake_case__ , num_beams=snake_case__ , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ )
self.assertEqual(
snake_case__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , )
self.assertEqual(
snake_case__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ''}] )
| 717 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 0 |
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =FlaxAutoencoderKL
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
while a != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b % a, a
return b
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
if gcd(_UpperCamelCase , _UpperCamelCase ) != 1:
SCREAMING_SNAKE_CASE = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1, 0, a
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE = ua // va
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
a_ : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the ๐ค hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger("transformers.models.speecht5")
__lowerCAmelCase : List[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__lowerCAmelCase : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__lowerCAmelCase : List[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__lowerCAmelCase : Optional[int] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__lowerCAmelCase : int = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__lowerCAmelCase : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__lowerCAmelCase : Dict = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__lowerCAmelCase : List[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__lowerCAmelCase : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCAmelCase : int = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCAmelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Dict = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__lowerCAmelCase : Tuple = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__lowerCAmelCase : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__lowerCAmelCase : Dict = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
if task == "s2t":
lowerCAmelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ = MAPPING_S2T
lowerCAmelCase__ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCAmelCase__ = None
lowerCAmelCase__ = MAPPING_T2S
lowerCAmelCase__ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCAmelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ = MAPPING_S2S
lowerCAmelCase__ = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ = """bias"""
elif "weight" in name:
lowerCAmelCase__ = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ = """running_mean"""
elif "running_var" in name:
lowerCAmelCase__ = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ = """num_batches_tracked"""
else:
lowerCAmelCase__ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ = name.split(""".""" )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = SpeechTaConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCAmelCase__ = SpeechTaConfig()
if task == "s2t":
lowerCAmelCase__ = config.max_text_positions
lowerCAmelCase__ = SpeechTaForSpeechToText(lowerCamelCase__ )
elif task == "t2s":
lowerCAmelCase__ = 1876
lowerCAmelCase__ = 600
lowerCAmelCase__ = config.max_speech_positions
lowerCAmelCase__ = SpeechTaForTextToSpeech(lowerCamelCase__ )
elif task == "s2s":
lowerCAmelCase__ = 1876
lowerCAmelCase__ = config.max_speech_positions
lowerCAmelCase__ = SpeechTaForSpeechToSpeech(lowerCamelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowerCAmelCase__ = SpeechTaTokenizer(lowerCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken("""<mask>""" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
lowerCAmelCase__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCAmelCase__ = SpeechTaFeatureExtractor()
lowerCAmelCase__ = SpeechTaProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = torch.load(lowerCamelCase__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the ๐ค hub."
)
__lowerCAmelCase : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import numpy as np
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1e-12 , lowerCamelCase__ = 100 , ):
"""simple docstring"""
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase__ ) == np.iscomplexobj(lowerCamelCase__ )
lowerCAmelCase__ = np.iscomplexobj(lowerCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase__ = np.dot(lowerCamelCase__ , lowerCamelCase__ )
# Normalize the resulting output vector.
lowerCAmelCase__ = w / np.linalg.norm(lowerCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase__ = vector.conj().T if is_complex else vector.T
lowerCAmelCase__ = np.dot(lowerCamelCase__ , np.dot(lowerCamelCase__ , lowerCamelCase__ ) )
# Check convergence.
lowerCAmelCase__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase__ = True
lowerCAmelCase__ = lambda_
if is_complex:
lowerCAmelCase__ = np.real(lambda_ )
return lambda_, vector
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase__ = np.array([41, 4, 20] )
lowerCAmelCase__ = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase__ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase__ = real_input_matrix
lowerCAmelCase__ = real_vector
elif problem_type == "complex":
lowerCAmelCase__ = complex_input_matrix
lowerCAmelCase__ = complex_vector
# Our implementation.
lowerCAmelCase__ , lowerCAmelCase__ = power_iteration(lowerCamelCase__ , lowerCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase__ , lowerCAmelCase__ = np.linalg.eigh(lowerCamelCase__ )
# Last eigenvalue is the maximum one.
lowerCAmelCase__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase__ ) - np.abs(lowerCamelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = "โ"
__lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase : Dict = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
__lowerCAmelCase : Optional[int] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
__lowerCAmelCase : str = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[int] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<s>" , snake_case__ : Any="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Optional[int]="<mask>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ' | 's' | 'โde' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ' | 's' | 'โde' | '-' | 'โa'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else """en_XX"""
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : str ):
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Tuple , snake_case__ : Dict ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str ):
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = """"""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(snake_case__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : int ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Tuple , ):
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : str ):
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str ):
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : Optional[Any] = 3
class a_ ( __UpperCamelCase ):
pass
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
for shard in shards:
for i in range(lowerCamelCase__ ):
yield {"i": i, "shard": shard}
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = int(os.environ["""RANK"""] )
lowerCAmelCase__ = int(os.environ["""WORLD_SIZE"""] )
lowerCAmelCase__ = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCamelCase__ )
parser.add_argument("""--local_rank""" , type=lowerCamelCase__ )
parser.add_argument("""--num_workers""" , type=lowerCamelCase__ , default=0 )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.streaming
lowerCAmelCase__ = args.num_workers
lowerCAmelCase__ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(lowerCamelCase__ )]}
lowerCAmelCase__ = IterableDataset.from_generator(lowerCamelCase__ , gen_kwargs=lowerCamelCase__ )
if not streaming:
lowerCAmelCase__ = Dataset.from_list(list(lowerCamelCase__ ) )
lowerCAmelCase__ = split_dataset_by_node(lowerCamelCase__ , rank=lowerCamelCase__ , world_size=lowerCamelCase__ )
lowerCAmelCase__ = torch.utils.data.DataLoader(lowerCamelCase__ , num_workers=lowerCamelCase__ )
lowerCAmelCase__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCAmelCase__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCAmelCase__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : Tuple = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert len(str(lowerCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCAmelCase__ = year // 100
lowerCAmelCase__ = (5 * (century % 4) + 2) % 7
lowerCAmelCase__ = year % 100
lowerCAmelCase__ = centurian % 12
lowerCAmelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCAmelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCAmelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self : Dict , snake_case__ : Tuple , snake_case__ : str=13 , snake_case__ : List[str]=32 , snake_case__ : List[Any]=3 , snake_case__ : Tuple=4 , snake_case__ : Optional[Any]=[10, 20, 30, 40] , snake_case__ : int=[2, 2, 3, 2] , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : int=10 , snake_case__ : Optional[int]=0.02 , snake_case__ : int=["stage2", "stage3", "stage4"] , snake_case__ : Tuple=[2, 3, 4] , snake_case__ : List[Any]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_stages
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = scope
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[int] ):
lowerCAmelCase__ = ConvNextVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = ConvNextVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] ):
lowerCAmelCase__ = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase__ = None
lowerCAmelCase__ = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ConvNextVaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase__ = True
if model_class.__name__ in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]:
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Any ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase__ = False
lowerCAmelCase__ = True
if (
model_class.__name__
in [*get_values(snake_case__ ), *get_values(snake_case__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
def check_hidden_states_output(snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] ):
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = ConvNextVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(snake_case__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = preprocessor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config ๐ค
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy ๐
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""โ๏ธ Both model do output the same tensors""" )
else:
print("""โ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a_ ( unittest.TestCase ):
UpperCamelCase_ : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
lowerCAmelCase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase__ = VideoClassificationPipeline(model=snake_case__ , image_processor=snake_case__ , top_k=2 )
lowerCAmelCase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Optional[int] , snake_case__ : str ):
for example in examples:
lowerCAmelCase__ = video_classifier(snake_case__ )
self.assertEqual(
snake_case__ , [
{"""score""": ANY(snake_case__ ), """label""": ANY(snake_case__ )},
{"""score""": ANY(snake_case__ ), """label""": ANY(snake_case__ )},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowerCAmelCase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowerCAmelCase__ = pipeline(
"""video-classification""" , model=snake_case__ , feature_extractor=snake_case__ , frame_sampling_rate=4 )
lowerCAmelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase__ = video_classifier(snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
lowerCAmelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : int ):
pass
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Dict = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str , snake_case__ : bool , snake_case__ : str = None , snake_case__ : list = None ):
lowerCAmelCase__ = None
lowerCAmelCase__ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowerCAmelCase__ = os.path.abspath("""examples""" )
for item in os.listdir(snake_case__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase__ = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case__ , feature_script=snake_case__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowerCAmelCase__ = compare_against_test(
os.path.join(snake_case__ , snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = """\n""".join(snake_case__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase__ = diff.replace(snake_case__ , """""" )
self.assertEqual(snake_case__ , """""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.one_complete_example("""complete_nlp_example.py""" , snake_case__ )
self.one_complete_example("""complete_nlp_example.py""" , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowerCAmelCase__ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , snake_case__ , snake_case__ , snake_case__ )
self.one_complete_example("""complete_cv_example.py""" , snake_case__ , snake_case__ , snake_case__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ):
super().setUpClass()
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ )
self.assertNotIn("""epoch 0:""" , snake_case__ )
self.assertIn("""epoch 1:""" , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ )
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , snake_case__ )
self.assertIn("""epoch 1:""" , snake_case__ )
else:
self.assertIn("""epoch 0:""" , snake_case__ )
self.assertIn("""epoch 1:""" , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ )
lowerCAmelCase__ = re.findall("""({.+})""" , snake_case__ )
lowerCAmelCase__ = [r for r in results if """accuracy""" in r][-1]
lowerCAmelCase__ = ast.literal_eval(snake_case__ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , """tracking""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(lowerCamelCase__ )
return len(lowerCamelCase__ ) == 9 and set(lowerCamelCase__ ) == set("""123456789""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 10_0002 * base_num
if is_9_pandigital(lowerCamelCase__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 100_2003 * base_num
if is_9_pandigital(lowerCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if point:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for item in point:
if not isinstance(lowerCamelCase__ , (int, float) ):
lowerCAmelCase__ = (
"""Expected a list of numbers as input, found """
f"""{type(lowerCamelCase__ ).__name__}"""
)
raise TypeError(lowerCamelCase__ )
else:
lowerCAmelCase__ = f"""Expected a list of numbers as input, found {type(lowerCamelCase__ ).__name__}"""
raise TypeError(lowerCamelCase__ )
else:
raise ValueError("""Missing an input""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = "bert"
def __init__( self : int , snake_case__ : Any=30522 , snake_case__ : Union[str, Any]=768 , snake_case__ : Any=12 , snake_case__ : Optional[int]=12 , snake_case__ : str=3072 , snake_case__ : int="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[Any]=512 , snake_case__ : Dict=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : str=1E-12 , snake_case__ : List[str]=0 , snake_case__ : int="absolute" , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=None , **snake_case__ : Optional[int] , ):
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = classifier_dropout
class a_ ( __UpperCamelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self : int , snake_case__ : Any , snake_case__ : List[str]=13 , snake_case__ : int=32 , snake_case__ : Dict=2 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[Any]=16 , snake_case__ : Tuple=[1, 2, 1] , snake_case__ : Optional[int]=[2, 2, 4] , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=2.0 , snake_case__ : List[str]=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : int=False , snake_case__ : List[str]=True , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1E-5 , snake_case__ : Union[str, Any]=True , snake_case__ : str=None , snake_case__ : List[Any]=True , snake_case__ : str=10 , snake_case__ : Dict=8 , snake_case__ : List[Any]=["stage1", "stage2", "stage3"] , snake_case__ : List[str]=[1, 2, 3] , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ):
lowerCAmelCase__ = MaskFormerSwinModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] ):
lowerCAmelCase__ = MaskFormerSwinBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case__ ):
lowerCAmelCase__ = ["""stem"""]
lowerCAmelCase__ = MaskFormerSwinBackbone(config=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = MaskFormerSwinModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Any ):
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# Swin has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = 0
return t
def check_equivalence(snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : int={} ):
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ )
lowerCAmelCase__ = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple()
def recursive_check(snake_case__ : Dict , snake_case__ : int ):
if isinstance(snake_case__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ):
recursive_check(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case__ , snake_case__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has"""
F""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}."""
) , )
recursive_check(snake_case__ , snake_case__ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"""output_hidden_states""": True} )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] = MaskFormerSwinConfig
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = MaskFormerSwinModelTester(self )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase__ = backbone_class(snake_case__ )
backbone.to(snake_case__ )
backbone.eval()
lowerCAmelCase__ = backbone(**snake_case__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase__ = backbone(**snake_case__ , output_hidden_states=snake_case__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase__ = backbone(**snake_case__ , output_attentions=snake_case__ )
self.assertIsNotNone(outputs.attentions )
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
UpperCamelCase_ : Union[str, Any] = OPTConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : str = "gelu"
def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : Dict=13 , snake_case__ : Tuple=7 , snake_case__ : List[str]=True , snake_case__ : int=False , snake_case__ : str=99 , snake_case__ : List[str]=16 , snake_case__ : str=2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[Any]=4 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[str]=20 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : List[str]=0 , snake_case__ : Dict=16 , snake_case__ : str=16 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = word_embed_proj_dim
lowerCAmelCase__ = False
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
lowerCAmelCase__ = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Any ):
lowerCAmelCase__ = TFOPTModel(config=snake_case__ )
lowerCAmelCase__ = inputs_dict["""input_ids"""]
lowerCAmelCase__ = input_ids[:1, :]
lowerCAmelCase__ = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase__ = 1
# first forward pass
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ )[0]
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : List[str] = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : List[Any] = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Dict = 10
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = TFOPTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ : Tuple , snake_case__ : Tuple ):
if hasattr(snake_case__ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCAmelCase__ = model_class(config=snake_case__ )
lowerCAmelCase__ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
lowerCAmelCase__ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
lowerCAmelCase__ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
lowerCAmelCase__ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
lowerCAmelCase__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
lowerCAmelCase__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ = False
self.assertTrue(snake_case__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return tf.constant(lowerCamelCase__ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = 99
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase__ = input_ids.shape[0]
lowerCAmelCase__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCAmelCase__ = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCAmelCase__ = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase__ = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
lowerCAmelCase__ = (1, 11, 512)
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4E-3 ) )
lowerCAmelCase__ = tf.function(snake_case__ , jit_compile=snake_case__ )
lowerCAmelCase__ = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4E-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
lowerCAmelCase__ = """facebook/opt-350m"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-4 ) )
lowerCAmelCase__ = tf.function(snake_case__ , jit_compile=snake_case__ )
lowerCAmelCase__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = """facebook/opt-125m"""
lowerCAmelCase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCAmelCase__ = []
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase__ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(snake_case__ , max_length=10 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = """facebook/opt-350m"""
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase__ = TFOPTForCausalLM.from_pretrained(snake_case__ )
lowerCAmelCase__ = """left"""
# use different length sentences to test batching
lowerCAmelCase__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ )
lowerCAmelCase__ = inputs["""input_ids"""]
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""] )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ )
lowerCAmelCase__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """facebook/opt-350m"""
lowerCAmelCase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCAmelCase__ = []
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase__ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(snake_case__ , max_length=10 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = StableUnCLIPImgaImgPipeline
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : Tuple = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = embedder_hidden_size
# image encoding components
lowerCAmelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=snake_case__ , projection_dim=snake_case__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case__ , layers_per_block=1 , upcast_attention=snake_case__ , use_linear_projection=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=snake_case__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL()
lowerCAmelCase__ = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Any=True ):
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if pil_image:
lowerCAmelCase__ = input_image * 0.5 + 0.5
lowerCAmelCase__ = input_image.clamp(0 , 1 )
lowerCAmelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ = DiffusionPipeline.numpy_to_pil(snake_case__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
inputs.update({"""image_embeds""": None} )
lowerCAmelCase__ = sd_pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ = pipe(snake_case__ , """anime turle""" , generator=snake_case__ , output_type="""np""" )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ = pipe(snake_case__ , """anime turle""" , generator=snake_case__ , output_type="""np""" )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = pipe(
snake_case__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(snake_case__ , """num_attention_heads""" ) )
class a_ :
def __init__( self : str , snake_case__ : Dict , snake_case__ : int=13 , snake_case__ : List[str]=64 , snake_case__ : List[Any]=3 , snake_case__ : Tuple=3 , snake_case__ : Any=2 , snake_case__ : Optional[Any]=1 , snake_case__ : Optional[int]=16 , snake_case__ : Optional[int]=[128, 256, 384] , snake_case__ : str=[4, 6, 8] , snake_case__ : Union[str, Any]=[2, 3, 4] , snake_case__ : str=[16, 16, 16] , snake_case__ : Dict=0 , snake_case__ : List[Any]=[2, 2, 2] , snake_case__ : List[Any]=[2, 2, 2] , snake_case__ : List[str]=0.02 , snake_case__ : List[str]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = stride
lowerCAmelCase__ = padding
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = depths
lowerCAmelCase__ = key_dim
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = attention_ratio
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : str ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] ):
lowerCAmelCase__ = LevitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ )
lowerCAmelCase__ = (self.image_size, self.image_size)
lowerCAmelCase__ , lowerCAmelCase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Any ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = LevitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : str = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = LevitModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(snake_case__ : str , snake_case__ : int , snake_case__ : Any ):
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
lowerCAmelCase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase__ , lowerCAmelCase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
pass
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase__ = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase__ = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowerCAmelCase__ = problem_type["""title"""]
lowerCAmelCase__ = problem_type["""num_labels"""]
lowerCAmelCase__ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCAmelCase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
lowerCAmelCase__ = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = LevitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
lowerCAmelCase__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(42 )
lowerCAmelCase__ = pipe(generator=snake_case__ , steps=4 )
lowerCAmelCase__ = output.audios[0]
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(42 )
lowerCAmelCase__ = pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
lowerCAmelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCAmelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCAmelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
lowerCAmelCase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = self.dummy_vqvae_and_unet
lowerCAmelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
lowerCAmelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(42 )
lowerCAmelCase__ = pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
lowerCAmelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCAmelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCAmelCase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase__ = self.dummy_unet_condition
lowerCAmelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
lowerCAmelCase__ = torch.rand((1, 1, 10) )
lowerCAmelCase__ = pipe(generator=snake_case__ , encoding=snake_case__ )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCAmelCase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = torch_device
lowerCAmelCase__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(42 )
lowerCAmelCase__ = pipe(generator=snake_case__ )
lowerCAmelCase__ = output.audios[0]
lowerCAmelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCAmelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCAmelCase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = ScoreSdeVeScheduler()
lowerCAmelCase__ = ScoreSdeVePipeline(unet=snake_case__ , scheduler=snake_case__ )
sde_ve.to(snake_case__ )
sde_ve.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case__ ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case__ , return_dict=snake_case__ )[
0
]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """google/ncsnpp-church-256"""
lowerCAmelCase__ = UNetaDModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(snake_case__ )
lowerCAmelCase__ = ScoreSdeVePipeline(unet=snake_case__ , scheduler=snake_case__ )
sde_ve.to(snake_case__ )
sde_ve.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
import os
def _UpperCAmelCase ( lowerCamelCase__ = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
lowerCAmelCase__ = in_file.read()
lowerCAmelCase__ = [[int(lowerCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
lowerCAmelCase__ = [[0 for cell in row] for row in grid]
lowerCAmelCase__ = len(grid[0] )
lowerCAmelCase__ = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
lowerCAmelCase__ = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
lowerCAmelCase__ = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
lowerCAmelCase__ = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
lowerCAmelCase__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Any = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ : Dict = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCamelCase_ : int = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCamelCase_ : Optional[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase_ : str = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**snake_case__ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase__ = DDIMScheduler(**snake_case__ )
lowerCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Any , snake_case__ : Dict=0 ):
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = """cpu"""
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase__ = init_image.resize((512, 512) )
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
lowerCAmelCase__ = torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCAmelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase__ = """A robot, 4k photo"""
lowerCAmelCase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
snake_case__ , image=snake_case__ , strength=0.85 , generator=snake_case__ , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__="facebook/mbart-large-en-ro" , lowerCamelCase__=False , lowerCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(lowerCamelCase__ )
lowerCAmelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase__ = MBartConfig.from_pretrained(lowerCamelCase__ , vocab_size=lowerCamelCase__ )
if mbart_aa and finetuned:
lowerCAmelCase__ = """relu"""
lowerCAmelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCAmelCase__ = MBartForConditionalGeneration(lowerCamelCase__ )
model.model.load_state_dict(lowerCamelCase__ )
if finetuned:
lowerCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__lowerCAmelCase : Any = parser.parse_args()
__lowerCAmelCase : int = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
import math
import sys
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = """"""
try:
with open(lowerCamelCase__ , """rb""" ) as binary_file:
lowerCAmelCase__ = binary_file.read()
for dat in data:
lowerCAmelCase__ = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {"""0""": """0""", """1""": """1"""}
lowerCAmelCase__ , lowerCAmelCase__ = """""", """"""
lowerCAmelCase__ = len(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ = last_match_id + """0"""
if math.loga(lowerCamelCase__ ).is_integer():
lowerCAmelCase__ = {}
for curr_key in list(lowerCamelCase__ ):
lowerCAmelCase__ = lexicon.pop(lowerCamelCase__ )
lowerCAmelCase__ = new_lex
lowerCAmelCase__ = last_match_id + """1"""
index += 1
lowerCAmelCase__ = """"""
return result
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = 8
try:
with open(lowerCamelCase__ , """wb""" ) as opened_file:
lowerCAmelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCamelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ = data_bits[counter:]
lowerCAmelCase__ = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = read_file_binary(lowerCamelCase__ )
lowerCAmelCase__ = remove_prefix(lowerCamelCase__ )
lowerCAmelCase__ = decompress_data(lowerCamelCase__ )
write_file_binary(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCAmelCase__ = 6
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1901
lowerCAmelCase__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCAmelCase__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCAmelCase__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCAmelCase__ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCAmelCase__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = "openai-gpt"
UpperCamelCase_ : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , snake_case__ : Any=40478 , snake_case__ : List[str]=512 , snake_case__ : Any=768 , snake_case__ : List[Any]=12 , snake_case__ : List[Any]=12 , snake_case__ : Dict="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[int]=1E-5 , snake_case__ : Optional[int]=0.02 , snake_case__ : Tuple="cls_index" , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=None , snake_case__ : int=True , snake_case__ : Optional[int]=0.1 , **snake_case__ : Tuple , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = afn
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_first_dropout
lowerCAmelCase__ = summary_proj_to_labels
super().__init__(**snake_case__ )
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config ๐ค
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy ๐
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""โ๏ธ Both model do output the same tensors""" )
else:
print("""โ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class a_ ( __UpperCamelCase ):
def __init__( self : str ):
lowerCAmelCase__ = []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , **snake_case__ : Any ):
self.events.append("""on_init_end""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[Any] , **snake_case__ : str ):
self.events.append("""on_train_begin""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : str , **snake_case__ : Any ):
self.events.append("""on_train_end""" )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str , snake_case__ : List[str] , snake_case__ : str , **snake_case__ : Dict ):
self.events.append("""on_epoch_begin""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , **snake_case__ : str ):
self.events.append("""on_epoch_end""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , **snake_case__ : str ):
self.events.append("""on_step_begin""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , **snake_case__ : int ):
self.events.append("""on_step_end""" )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] , **snake_case__ : Any ):
self.events.append("""on_evaluate""" )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int , **snake_case__ : Optional[int] ):
self.events.append("""on_predict""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
self.events.append("""on_save""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int , **snake_case__ : List[Any] ):
self.events.append("""on_log""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , **snake_case__ : List[Any] ):
self.events.append("""on_prediction_step""" )
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict=0 , snake_case__ : List[str]=0 , snake_case__ : Any=64 , snake_case__ : Tuple=64 , snake_case__ : str=None , snake_case__ : int=False , **snake_case__ : List[str] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase__ = RegressionDataset(length=snake_case__ )
lowerCAmelCase__ = RegressionDataset(length=snake_case__ )
lowerCAmelCase__ = RegressionModelConfig(a=snake_case__ , b=snake_case__ )
lowerCAmelCase__ = RegressionPreTrainedModel(snake_case__ )
lowerCAmelCase__ = TrainingArguments(self.output_dir , disable_tqdm=snake_case__ , report_to=[] , **snake_case__ )
return Trainer(
snake_case__ , snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , callbacks=snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
# Order doesn't matter
lowerCAmelCase__ = sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
lowerCAmelCase__ = sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , cba.__class__ )
elif not isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(cba.__class__ , snake_case__ )
else:
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[str] ):
lowerCAmelCase__ = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(trainer.get_eval_dataloader() )
lowerCAmelCase__ = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.get_trainer()
lowerCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase__ = self.get_trainer(disable_tqdm=snake_case__ )
lowerCAmelCase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
lowerCAmelCase__ = self.get_trainer()
lowerCAmelCase__ = trainer.pop_callback(snake_case__ )
self.assertEqual(cb.__class__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# We can also add, pop, or remove by instance
lowerCAmelCase__ = self.get_trainer()
lowerCAmelCase__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
lowerCAmelCase__ = self.get_trainer()
lowerCAmelCase__ = trainer.callback_handler.callbacks[0]
lowerCAmelCase__ = trainer.pop_callback(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case__ )
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# Independent log/save/eval
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# A bit of everything
lowerCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case__ ) in warn_mock.call_args[0][0]
| 674 | """simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModel.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForPreTraining.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForPreTraining.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(snake_case__ , from_tf=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForMaskedLM.from_pretrained(snake_case__ , from_tf=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = AutoModelForMaskedLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ , from_tf=snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase__ = AutoModelForQuestionAnswering.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 14410 )
lowerCAmelCase__ = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 14410 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 14410 )
lowerCAmelCase__ = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 14410 )
| 674 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
def extract(*snake_case__ : List[Any] , **snake_case__ : List[str] ):
class a_ :
def __init__( self : Any ):
lowerCAmelCase__ = torch.ones([0] )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, Any] ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(snake_case__ )
lowerCAmelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCAmelCase__ = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = """A painting of a squirrel eating a burger"""
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=snake_case__ , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=snake_case__ , return_dict=snake_case__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(snake_case__ )
# put models in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = vae.half()
lowerCAmelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCAmelCase__ = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = """A painting of a squirrel eating a burger"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="""np""" , image=snake_case__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ = init_image.resize((760, 504) )
lowerCAmelCase__ = """BAAI/AltDiffusion"""
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = """A fantasy landscape, trending on artstation"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="""np""" , )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCAmelCase__ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase__ = init_image.resize((768, 512) )
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowerCAmelCase__ = """BAAI/AltDiffusion"""
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = """A fantasy landscape, trending on artstation"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="""np""" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
lowerCAmelCase__ = DetaConfig(
backbone_config=lowerCamelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCamelCase__ , with_box_refine=lowerCamelCase__ , two_stage=lowerCamelCase__ , )
# set labels
lowerCAmelCase__ = """huggingface/label-files"""
if "o365" in model_name:
lowerCAmelCase__ = 366
lowerCAmelCase__ = """object365-id2label.json"""
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = """coco-detection-id2label.json"""
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase__ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = dct.pop(lowerCamelCase__ )
lowerCAmelCase__ = val
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:dim, :]
lowerCAmelCase__ = in_proj_bias[: dim]
lowerCAmelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase__ = in_proj_weight[
-dim :, :
]
lowerCAmelCase__ = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:hidden_size, :]
lowerCAmelCase__ = in_proj_bias[:hidden_size]
lowerCAmelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase__ = in_proj_weight[-hidden_size:, :]
lowerCAmelCase__ = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = get_deta_config(lowerCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase__ = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase__ = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
lowerCAmelCase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowerCamelCase__ , param.shape )
# rename keys
lowerCAmelCase__ = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_swin_q_k_v(lowerCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase__ = state_dict.pop(lowerCamelCase__ )
lowerCAmelCase__ = val
if "input_proj" in key:
lowerCAmelCase__ = state_dict.pop(lowerCamelCase__ )
lowerCAmelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase__ = state_dict.pop(lowerCamelCase__ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetaForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowerCamelCase__ )
# load image processor
lowerCAmelCase__ = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(images=lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = encoding["""pixel_values"""]
lowerCAmelCase__ = model(pixel_values.to(lowerCamelCase__ ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase__ = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
lowerCAmelCase__ = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase__ = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
lowerCAmelCase__ = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase__ ) , atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the ๐ค hub."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 674 | """simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase__ = word_bank or []
# create a table
lowerCAmelCase__ = len(lowerCamelCase__ ) + 1
lowerCAmelCase__ = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
lowerCAmelCase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
lowerCAmelCase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = args.pruning_method
lowerCAmelCase__ = args.threshold
lowerCAmelCase__ = args.model_name_or_path.rstrip("""/""" )
lowerCAmelCase__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase__ = torch.load(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) )
lowerCAmelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase__ = MagnitudeBinarizer.apply(inputs=lowerCamelCase__ , threshold=lowerCamelCase__ )
lowerCAmelCase__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ = TopKBinarizer.apply(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ = ThresholdBinarizer.apply(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ , lowerCAmelCase__ = -0.1, 1.1
lowerCAmelCase__ = torch.sigmoid(lowerCamelCase__ )
lowerCAmelCase__ = s * (r - l) + l
lowerCAmelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowerCAmelCase__ = os.path.join(
os.path.dirname(lowerCamelCase__ ) , f"""bertarized_{os.path.basename(lowerCamelCase__ )}""" )
if not os.path.isdir(lowerCamelCase__ ):
shutil.copytree(lowerCamelCase__ , lowerCamelCase__ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__lowerCAmelCase : int = parser.parse_args()
main(args)
| 674 | """simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class a_ ( __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = "resnet"
UpperCamelCase_ : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : List[Any] , snake_case__ : int=3 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=[256, 512, 1024, 2048] , snake_case__ : Tuple=[3, 4, 6, 3] , snake_case__ : int="bottleneck" , snake_case__ : Any="relu" , snake_case__ : Union[str, Any]=False , snake_case__ : int=None , snake_case__ : List[str]=None , **snake_case__ : Tuple , ):
super().__init__(**snake_case__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embedding_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = downsample_in_first_stage
lowerCAmelCase__ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Dict = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
return 1E-3
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=snake_case__ , scheduler=snake_case__ )
pndm.to(snake_case__ )
pndm.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=snake_case__ , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=snake_case__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=snake_case__ )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = """google/ddpm-cifar10-32"""
lowerCAmelCase__ = UNetaDModel.from_pretrained(snake_case__ )
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=snake_case__ , scheduler=snake_case__ )
pndm.to(snake_case__ )
pndm.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=snake_case__ , output_type="""numpy""" ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 674 | """simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674 | 1 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCAmelCase : Union[str, Any] = pytest.mark.integration
__lowerCAmelCase : List[str] = {"comet"}
__lowerCAmelCase : List[Any] = importlib.util.find_spec("fairseq") is not None
__lowerCAmelCase : Dict = {"code_eval"}
__lowerCAmelCase : List[str] = os.name == "nt"
__lowerCAmelCase : Tuple = {"bertscore", "frugalscore", "perplexity"}
__lowerCAmelCase : List[Any] = importlib.util.find_spec("transformers") is not None
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@local
class a_ ( parameterized.TestCase ):
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = """[...]"""
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , snake_case__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : int ):
lowerCAmelCase__ = """[...]"""
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : int ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
def load_local_metric(snake_case__ : Dict , *snake_case__ : List[str] , **snake_case__ : int ):
return load_metric(os.path.join("""metrics""" , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , snake_case__ : int ):
def wrapper(snake_case__ : str ):
lowerCAmelCase__ = contextmanager(snake_case__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Any ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
import torch
def bert_cos_score_idf(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def load_from_checkpoint(lowerCamelCase__ ):
class a_ :
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
assert len(snake_case__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
lowerCAmelCase__ = """ERROR"""
lowerCAmelCase__ = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCamelCase__ )
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__lowerCAmelCase : Any = F"https://www.google.com/search?q={query}&num=100"
__lowerCAmelCase : Optional[int] = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__lowerCAmelCase : Union[str, Any] = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__lowerCAmelCase : Dict = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCamelCase_ : List[Any] = "CLIPImageProcessor"
UpperCamelCase_ : Union[str, Any] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : List[str] , snake_case__ : str=None , snake_case__ : Any=None , **snake_case__ : List[Any] ):
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case__ , )
lowerCAmelCase__ = kwargs.pop("""feature_extractor""" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : List[str] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , **snake_case__ : Optional[int] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase__ = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , *snake_case__ : int , **snake_case__ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *snake_case__ : Dict , **snake_case__ : List[str] ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = "xlm-prophetnet"
UpperCamelCase_ : Optional[int] = ["past_key_values"]
UpperCamelCase_ : Tuple = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : List[Any] , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 30522 , snake_case__ : Optional[int] = 1024 , snake_case__ : Optional[int] = 4096 , snake_case__ : Optional[int] = 12 , snake_case__ : Optional[int] = 16 , snake_case__ : Optional[int] = 4096 , snake_case__ : Optional[int] = 12 , snake_case__ : Optional[int] = 16 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 512 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 32 , snake_case__ : Optional[int] = 128 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : Tuple , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = encoder_ffn_dim
lowerCAmelCase__ = num_encoder_layers
lowerCAmelCase__ = num_encoder_attention_heads
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = num_decoder_attention_heads
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = init_std # Normal(0, this parameter)
lowerCAmelCase__ = activation_function
# parameters for xlmprophetnet
lowerCAmelCase__ = ngram
lowerCAmelCase__ = num_buckets
lowerCAmelCase__ = relative_max_distance
lowerCAmelCase__ = disable_ngram_loss
lowerCAmelCase__ = eps
# 3 Types of Dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = dropout
lowerCAmelCase__ = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[int] ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 674 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 1 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ): # noqa: E741
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * n
lowerCAmelCase__ = [False] * n
lowerCAmelCase__ = [False] * n
def dfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if parent == root:
out_edge_count += 1
lowerCAmelCase__ = True
lowerCAmelCase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCAmelCase__ = dfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCAmelCase__ = True
# AP found via cycle
if at == low[to]:
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = min(low[at] , lowerCamelCase__ )
return out_edge_count
for i in range(lowerCamelCase__ ):
if not visited[i]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = dfs(lowerCamelCase__ , lowerCamelCase__ , -1 , lowerCamelCase__ )
lowerCAmelCase__ = out_edge_count > 1
for x in range(len(lowerCamelCase__ ) ):
if is_art[x] is True:
print(lowerCamelCase__ )
# Adjacency list of graph
__lowerCAmelCase : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowerCAmelCase__ = sylvester(number - 1 )
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 674 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCAmelCase ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCAmelCase ( ):
"""simple docstring"""
assert _test_patching.open is open
lowerCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowerCamelCase__ ):
pass
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , """len""" , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
lowerCAmelCase__ = patch_submodule(_test_patching , """open""" , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCAmelCase ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCAmelCase__ = """__test_patch_submodule_successive_join__"""
lowerCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
lowerCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase__ ):
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase__ ):
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowerCamelCase__ ):
pass
| 674 | """simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCAmelCase ( *lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , """r""" ) as fh:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCamelCase__ )
finally:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
__lowerCAmelCase : Optional[Any] = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase : Optional[Any] = torch.device("cuda", local_rank)
__lowerCAmelCase : List[str] = socket.gethostname()
__lowerCAmelCase : Dict = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase : str = dist.get_rank()
__lowerCAmelCase : List[Any] = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 674 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__)
__lowerCAmelCase : Dict = ["names", "prefix"]
__lowerCAmelCase : Optional[int] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__lowerCAmelCase : Optional[int] = ["encoding_errors", "on_bad_lines"]
__lowerCAmelCase : Optional[Any] = ["date_format"]
@dataclass
class a_ ( datasets.BuilderConfig ):
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_0000
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if self.delimiter is not None:
lowerCAmelCase__ = self.delimiter
if self.column_names is not None:
lowerCAmelCase__ = self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ : Dict = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"""files""": files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : pa.Table ):
if self.config.features is not None:
lowerCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case__ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(snake_case__ , snake_case__ )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
lowerCAmelCase__ = pd.read_csv(snake_case__ , iterator=snake_case__ , dtype=snake_case__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case__ ):
lowerCAmelCase__ = pa.Table.from_pandas(snake_case__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 674 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = "pegasus"
UpperCamelCase_ : Optional[int] = ["past_key_values"]
UpperCamelCase_ : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , snake_case__ : List[Any]=50265 , snake_case__ : Tuple=1024 , snake_case__ : Tuple=12 , snake_case__ : List[Any]=4096 , snake_case__ : Any=16 , snake_case__ : Any=12 , snake_case__ : Any=4096 , snake_case__ : List[str]=16 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=1024 , snake_case__ : int=0.1 , snake_case__ : int=0.0 , snake_case__ : Dict=0.0 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=0 , snake_case__ : Optional[Any]=False , snake_case__ : int=0 , snake_case__ : Dict=1 , snake_case__ : Tuple=1 , **snake_case__ : Optional[Any] , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = d_model
lowerCAmelCase__ = encoder_ffn_dim
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = encoder_attention_heads
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = init_std
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.d_model
| 674 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowerCAmelCase__ = TFDeiTModel(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(snake_case__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ):
lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ = model(**snake_case__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 674 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__lowerCAmelCase : Tuple = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__lowerCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : List[Any] = os.path.join(git_repo_path, "src", "transformers")
__lowerCAmelCase : List[str] = "\n{0} = None\n"
__lowerCAmelCase : Optional[int] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__lowerCAmelCase : Optional[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(snake_case__ )
lowerCAmelCase__ = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(snake_case__ , """tokenizers""" )
lowerCAmelCase__ = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(snake_case__ , """tensorflow_text""" )
lowerCAmelCase__ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(snake_case__ , """sentencepiece_and_tokenizers""" )
lowerCAmelCase__ = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(snake_case__ , """sentencepiece_and_tensorflow_text""" )
lowerCAmelCase__ = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(snake_case__ , """sentencepiece_and_tokenizers_and_vision""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , snake_case__ )
self.assertIn("""tensorflow_text""" , snake_case__ )
self.assertIn("""sentencepiece_and_tokenizers""" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(snake_case__ , """\nCONSTANT = None\n""" )
lowerCAmelCase__ = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
snake_case__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCAmelCase__ = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCAmelCase__ = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCAmelCase__ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , snake_case__ )
| 674 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = True
# hparam_utils.py hparams
lowerCAmelCase__ = 0.66_46_94
lowerCAmelCase__ = 0.20_79_51
lowerCAmelCase__ = 0.12_11_94
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 0.0_35_25_13
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ = 4
lowerCAmelCase__ = False
# hparam_utils.py hparams
lowerCAmelCase__ = 36.45_19
lowerCAmelCase__ = 0.90_34_21
lowerCAmelCase__ = 2_22.0_88
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0.76_31_41
lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 674 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = set()
# edges = list of graph's edges
lowerCAmelCase__ = get_edges(lowerCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase__ , lowerCAmelCase__ = edges.pop()
chosen_vertices.add(lowerCamelCase__ )
chosen_vertices.add(lowerCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase__ )
return chosen_vertices
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = GPTSanJapaneseTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : str = {"do_clean_text": False, "add_prefix_space": False}
def _SCREAMING_SNAKE_CASE ( self : Any ):
super().setUp()
# fmt: off
lowerCAmelCase__ = ["""ใใ""", """ใใใซ""", """ใซใกใฏ""", """ใฐใใฏ""", """ไธ็,ใบ็""", """ใ""", """ใ""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐
lowerCAmelCase__ = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case__ ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : int ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Any ):
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐"""
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[str] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ"""
lowerCAmelCase__ = ["""ใใ""", """ใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """<SP>""", """ใใ""", """ใฐใใฏ""", """ใ""", """ใบ็""", """ใ"""]
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids without special tokens
lowerCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids with special tokens
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ = """ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ"""
lowerCAmelCase__ = """ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ"""
lowerCAmelCase__ = tokenizer.encode(snake_case__ )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใ"""
lowerCAmelCase__ = """ใใใฐใใฏใใบ็ใ๐"""
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐"""
lowerCAmelCase__ = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase__ = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , prefix_text=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase__ = """ใใใซใกใฏใไธ็ใ"""
lowerCAmelCase__ = """ใใใฐใใฏใใบ็ใ๐"""
lowerCAmelCase__ = len(tokenizer.encode(snake_case__ ) ) - 2
lowerCAmelCase__ = len(tokenizer.encode(snake_case__ ) ) - 2
lowerCAmelCase__ = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase__ = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase__ = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase__ = tokenizer(snake_case__ , prefix_text=snake_case__ ).token_type_ids
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase__ = tokenizer.encode("""ใใณใใฏ""" )
lowerCAmelCase__ = tokenizer.encode("""""" , prefix_text="""ใใณใใฏ""" )
lowerCAmelCase__ = tokenizer.encode("""ใใฏ""" , prefix_text="""ใใณ""" )
self.assertEqual(tokenizer.decode(snake_case__ ) , tokenizer.decode(snake_case__ ) )
self.assertEqual(tokenizer.decode(snake_case__ ) , tokenizer.decode(snake_case__ ) )
self.assertNotEqual(snake_case__ , snake_case__ )
self.assertNotEqual(snake_case__ , snake_case__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase__ = [["""ๆญฆ็ฐไฟก็""", """ใฏใ"""], ["""็น็ฐไฟก้ท""", """ใฎ้
ไธใฎใ"""]]
lowerCAmelCase__ = tokenizer(snake_case__ , padding=snake_case__ )
lowerCAmelCase__ = tokenizer.batch_encode_plus(snake_case__ , padding=snake_case__ )
# fmt: off
lowerCAmelCase__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCAmelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case__ )
self.assertListEqual(x_token.token_type_ids , snake_case__ )
self.assertListEqual(x_token.attention_mask , snake_case__ )
self.assertListEqual(x_token_a.input_ids , snake_case__ )
self.assertListEqual(x_token_a.token_type_ids , snake_case__ )
self.assertListEqual(x_token_a.attention_mask , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
# tokenizer has no padding token
pass
| 674 | """simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config ๐ค
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy ๐
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""โ๏ธ Both model do output the same tensors""" )
else:
print("""โ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.