code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case = 0
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case = tuple[int, int]
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Node | None , ):
lowerCamelCase__ = pos_x
lowerCamelCase__ = pos_y
lowerCamelCase__ = (pos_y, pos_x)
lowerCamelCase__ = goal_x
lowerCamelCase__ = goal_y
lowerCamelCase__ = g_cost
lowerCamelCase__ = parent
lowerCamelCase__ = self.calculate_heuristic()
lowerCamelCase__ = self.g_cost + self.h_cost
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.pos_x - self.goal_x
lowerCamelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE__ ) + abs(SCREAMING_SNAKE_CASE__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : str , SCREAMING_SNAKE_CASE__ : Node ):
return self.f_cost < other.f_cost
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ):
lowerCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [self.start]
lowerCamelCase__ = []
lowerCamelCase__ = False
def _UpperCamelCase ( self : Dict ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE__ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_successors(SCREAMING_SNAKE_CASE__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
lowerCamelCase__ = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
return [self.start.pos]
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Node ):
lowerCamelCase__ = []
for action in delta:
lowerCamelCase__ = parent.pos_x + action[1]
lowerCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE__ , ) )
return successors
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Node | None ):
lowerCamelCase__ = node
lowerCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ = current_node.parent
path.reverse()
return path
class _a :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ):
lowerCamelCase__ = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
def _UpperCamelCase ( self : Dict ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = current_bwd_node
lowerCamelCase__ = current_fwd_node
lowerCamelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
lowerCamelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
return [self.fwd_astar.start.pos]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ):
lowerCamelCase__ = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = AStar(init, goal)
_snake_case = a_star.search()
_snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_snake_case = time.time()
_snake_case = BidirectionalAStar(init, goal)
_snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv2FeatureExtractor"]
_snake_case = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def snake_case ( _a: list[Any] )-> None:
'''simple docstring'''
create_state_space_tree(_a , [] , 0 )
def snake_case ( _a: list[Any] , _a: list[Any] , _a: int )-> None:
'''simple docstring'''
if index == len(_a ):
print(_a )
return
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = ['transformers', 'torch', 'note_seq']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def _UpperCamelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_snake_case = "__DUMMY_TRANSFORMERS_USER__"
_snake_case = "Dummy User"
_snake_case = "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"
_snake_case = "https://hub-ci.huggingface.co"
_snake_case = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_snake_case = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_snake_case = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def snake_case ( _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _a )
@pytest.fixture
def snake_case ( _a: List[Any] )-> Any:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _a )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _a )
@pytest.fixture
def snake_case ( _a: Optional[Any] )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _a )
@pytest.fixture
def snake_case ( _a: List[Any] , _a: List[Any] )-> List[Any]:
'''simple docstring'''
HfFolder.save_token(_a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def snake_case ( )-> List[Any]:
'''simple docstring'''
return HfApi(endpoint=_a )
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi )-> Dict:
'''simple docstring'''
lowerCamelCase__ = HfFolder.get_token()
HfFolder.save_token(_a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_a )
@pytest.fixture
def snake_case ( _a: int )-> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_a: int ):
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def snake_case ( _a: Tuple )-> str:
'''simple docstring'''
@contextmanager
def _temporary_repo(_a: Any ):
try:
yield repo_id
finally:
cleanup_repo(_a )
return _temporary_repo
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Dict , _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = F'repo_txt_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data/text_data.txt' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: Dict , _a: int , _a: Any )-> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Dict , _a: Optional[Any] )-> str:
'''simple docstring'''
lowerCamelCase__ = F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data.zip' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: List[Any] , _a: Tuple , _a: Dict )-> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Optional[Any] , _a: Tuple )-> int:
'''simple docstring'''
lowerCamelCase__ = F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data.zip' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: Optional[Any] , _a: Optional[Any] , _a: List[str] )-> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = UnCLIPImageVariationPipeline
a_ : List[str] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
a_ : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
a_ : List[str] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
a_ : Tuple = False
@property
def _UpperCamelCase ( self : Optional[Any] ):
return 32
@property
def _UpperCamelCase ( self : str ):
return 32
@property
def _UpperCamelCase ( self : str ):
return self.time_input_dim
@property
def _UpperCamelCase ( self : List[str] ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ):
return 1_00
@property
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : int ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCamelCase__ = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCamelCase__ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self : Any ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCamelCase ( self : List[Any] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.dummy_decoder
lowerCamelCase__ = self.dummy_text_proj
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_super_res_first
lowerCamelCase__ = self.dummy_super_res_last
lowerCamelCase__ = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
lowerCamelCase__ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
lowerCamelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCamelCase__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=True ):
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
if pil_image:
lowerCamelCase__ = input_image * 0.5 + 0.5
lowerCamelCase__ = input_image.clamp(0 , 1 )
lowerCamelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
**SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
**SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(
**SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = torch.device('cpu' )
class _a :
a_ : Dict = 1
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe.decoder.dtype
lowerCamelCase__ = 1
lowerCamelCase__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , scheduler=DummyScheduler() )
lowerCamelCase__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , scheduler=DummyScheduler() )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
**SCREAMING_SNAKE_CASE__ , decoder_latents=SCREAMING_SNAKE_CASE__ , super_res_latents=SCREAMING_SNAKE_CASE__ ).images
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ )
# Don't pass image, instead pass embedding
lowerCamelCase__ = pipeline_inputs.pop('image' )
lowerCamelCase__ = pipe.image_encoder(SCREAMING_SNAKE_CASE__ ).image_embeds
lowerCamelCase__ = pipe(
**SCREAMING_SNAKE_CASE__ , decoder_latents=SCREAMING_SNAKE_CASE__ , super_res_latents=SCREAMING_SNAKE_CASE__ , image_embeddings=SCREAMING_SNAKE_CASE__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=SCREAMING_SNAKE_CASE__ )
@skip_mps
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = torch_device == 'cpu'
lowerCamelCase__ = True
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=SCREAMING_SNAKE_CASE__ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ )
@skip_mps
def _UpperCamelCase ( self : List[str] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCamelCase ( self : Optional[int] ):
return super().test_save_load_local()
@skip_mps
def _UpperCamelCase ( self : List[Any] ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCamelCase__ = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ = pipeline(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 15 )
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case = datasets.load_iris()
_snake_case = np.array(data["data"])
_snake_case = np.array(data["target"])
_snake_case = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case = train_test_split(X, y)
def snake_case ( _a: Optional[Any] , _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_a ) - np.array(_a ) )
def snake_case ( _a: List[Any] , _a: List[Any] , _a: Optional[Any] , _a: List[str] , _a: List[Any]=5 )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = zip(_a , _a )
# List of distances of all points from the point to be classified
lowerCamelCase__ = []
for data_point in data:
lowerCamelCase__ = euclidean_distance(data_point[0] , _a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCamelCase__ = [i[1] for i in sorted(_a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCamelCase__ = Counter(_a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_snake_case = logging.get_logger(__name__)
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = question_encoder
lowerCamelCase__ = generator
lowerCamelCase__ = self.question_encoder
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase__ = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
lowerCamelCase__ = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ):
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.question_encoder
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.generator
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "longest" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
lowerCamelCase__ = self.current_tokenizer.model_max_length
lowerCamelCase__ = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase__ = self.current_tokenizer.model_max_length
lowerCamelCase__ = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = labels['input_ids']
return model_inputs
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def snake_case ( _a: Sequence[int] | None = None )-> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowerCamelCase__ = nums[0]
for i in range(1 , len(_a ) ):
lowerCamelCase__ = nums[i]
lowerCamelCase__ = max(_a , ans + num , _a )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case = int(input("Enter number of elements : ").strip())
_snake_case = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def snake_case ( _a: Tuple , _a: float = 0.0 , _a: float = 1.0 )-> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ):
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
lowerCamelCase__ = self.model.config
else:
lowerCamelCase__ = config
lowerCamelCase__ = data_args
lowerCamelCase__ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
lowerCamelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCamelCase__ = label_smoothed_nll_loss
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
if self.optimizer is None:
lowerCamelCase__ = ['bias', 'LayerNorm.weight']
lowerCamelCase__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
lowerCamelCase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCamelCase__ = Adafactor
lowerCamelCase__ = {'scale_parameter': False, 'relative_step': False}
else:
lowerCamelCase__ = AdamW
lowerCamelCase__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
lowerCamelCase__ = self.args.learning_rate
if self.sharded_ddp:
lowerCamelCase__ = OSS(
params=SCREAMING_SNAKE_CASE__ , optim=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
else:
lowerCamelCase__ = optimizer_cls(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.lr_scheduler is None:
lowerCamelCase__ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE__ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCamelCase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCamelCase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCamelCase__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
return scheduler
def _UpperCamelCase ( self : Any ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCamelCase__ , lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )[:2]
else:
# compute label smoothed loss
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = self.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = inputs.pop('labels' )
lowerCamelCase__ , lowerCamelCase__ = self._compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return loss
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , ):
lowerCamelCase__ = self._prepare_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCamelCase__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **SCREAMING_SNAKE_CASE__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE__ , gen_kwargs['max_length'] )
lowerCamelCase__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
lowerCamelCase__ , lowerCamelCase__ = self._compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCamelCase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE__ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
lowerCamelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
lowerCamelCase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCamelCase__ = tensor
return padded_tensor
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: str , _a: str )-> bool:
'''simple docstring'''
lowerCamelCase__ = get_failure_array(_a )
# 2) Step through text searching for pattern
lowerCamelCase__ , lowerCamelCase__ = 0, 0 # index into text, pattern
while i < len(_a ):
if pattern[j] == text[i]:
if j == (len(_a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase__ = failure[j - 1]
continue
i += 1
return False
def snake_case ( _a: str )-> list[int]:
'''simple docstring'''
lowerCamelCase__ = [0]
lowerCamelCase__ = 0
lowerCamelCase__ = 1
while j < len(_a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(_a )
return failure
if __name__ == "__main__":
# Test 1)
_snake_case = "abc1abc12"
_snake_case = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_snake_case = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_snake_case = "ABABX"
_snake_case = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
_snake_case = "AAAB"
_snake_case = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
_snake_case = "abcdabcy"
_snake_case = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
_snake_case = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_snake_case = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_snake_case = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_snake_case = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_snake_case = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_snake_case = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = randrange(len(_a ) ), randrange(len(_a ) )
lowerCamelCase__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCamelCase__ , lowerCamelCase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case ( _a: int = 100 )-> Dict:
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Dict , _a: Any )-> List[str]:
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: List[str] , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _a )
def snake_case ( _a: Dict , _a: Dict , _a: List[str] )-> Any:
'''simple docstring'''
lowerCamelCase__ = PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: str , _a: Tuple )-> Any:
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Union[str, Any] , _a: Any )-> str:
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _a )
def snake_case ( _a: int , _a: Optional[Any] , _a: str )-> List[str]:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def snake_case ( _a: List[Any] , _a: Optional[Any] , _a: Tuple )-> Dict:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [PokerHand(_a ) for hand in SORTED_HANDS]
lowerCamelCase__ = poker_hands.copy()
shuffle(_a )
lowerCamelCase__ = chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = PokerHand('2C 4S AS 3D 5C' )
lowerCamelCase__ = True
lowerCamelCase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = os.path.abspath(os.path.dirname(_a ) )
lowerCamelCase__ = os.path.join(_a , 'poker_hands.txt' )
with open(_a ) as file_hand:
for line in file_hand:
lowerCamelCase__ = line[:14].strip()
lowerCamelCase__ = line[15:].strip()
lowerCamelCase__ , lowerCamelCase__ = PokerHand(_a ), PokerHand(_a )
lowerCamelCase__ = player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 376
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = None
a_ : Dict = BloomTokenizerFast
a_ : Union[str, Any] = BloomTokenizerFast
a_ : Optional[int] = True
a_ : str = False
a_ : Optional[int] = 'tokenizer_file'
a_ : Optional[int] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _UpperCamelCase ( self : Optional[int] ):
super().setUp()
lowerCamelCase__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowerCamelCase__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowerCamelCase__ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ )['input_ids']
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ = ('This is a simple input', 'This is a pair')
lowerCamelCase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowerCamelCase__ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = next(iter(SCREAMING_SNAKE_CASE__ ) )['premise'] # pick up one data
lowerCamelCase__ = list(sample_data.values() )
lowerCamelCase__ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = StableDiffusionControlNetImgaImgPipeline
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
a_ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 2
lowerCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , )
lowerCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self : Dict ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = StableDiffusionControlNetImgaImgPipeline
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(SCREAMING_SNAKE_CASE__ : Any ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase__ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 2
lowerCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
]
lowerCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 10.0
lowerCamelCase__ = 4
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _UpperCamelCase ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ , controlnet=SCREAMING_SNAKE_CASE__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ = 'evil space-punk bird'
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
lowerCamelCase__ = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
lowerCamelCase__ = pipe(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , control_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = 'bird'
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = 'Chef in the kitchen'
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case ( _a: Image , _a: float )-> Image:
'''simple docstring'''
def brightness(_a: int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_snake_case = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class _a :
a_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to log verbose messages or not.'} , )
a_ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
a_ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
a_ : Optional[float] = field(
default=0.999995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def snake_case ( _a: ModelArguments , _a: TrainingArguments )-> Dict:
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase__ = logging.WARNING
if model_args.verbose_logging:
lowerCamelCase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCamelCase__ = logging.INFO
logger.setLevel(_a )
@dataclass
class _a :
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
a_ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
a_ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a_ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a_ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class _a :
a_ : WavaVecaForPreTraining
a_ : WavaVecaFeatureExtractor
a_ : Union[bool, str] = "longest"
a_ : Optional[int] = None
a_ : Optional[int] = None
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
lowerCamelCase__ = self.feature_extractor.pad(
SCREAMING_SNAKE_CASE__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
lowerCamelCase__ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
lowerCamelCase__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCamelCase__ = 1
lowerCamelCase__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCamelCase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=SCREAMING_SNAKE_CASE__ , min_masks=2 , )
return batch
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 0
lowerCamelCase__ = max_gumbel_temp
lowerCamelCase__ = min_gumbel_temp
lowerCamelCase__ = gumbel_temp_decay
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
lowerCamelCase__ = self._prepare_inputs(SCREAMING_SNAKE_CASE__ )
if self.use_amp:
with autocast():
lowerCamelCase__ = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase__ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE__ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
configure_logger(_a , _a )
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_a )
def prepare_dataset(_a: List[Any] ):
# check that all files have the correct sampling rate
lowerCamelCase__ , lowerCamelCase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCamelCase__ = datasets.map(
_a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
lowerCamelCase__ = vectorized_datasets.filter(
lambda _a : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_a: List[str] ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCamelCase__ = vectorized_datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCamelCase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
lowerCamelCase__ = WavaVecaForPreTraining(_a )
lowerCamelCase__ = DataCollatorForWavaVecaPretraining(model=_a , feature_extractor=_a )
lowerCamelCase__ = WavaVecaPreTrainer(
model=_a , data_collator=_a , args=_a , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.dummy_uncond_unet
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type='numpy' ).images
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type='numpy' , return_dict=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = 'google/ddpm-cifar10-32'
lowerCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : Any=36 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ = self.text_seq_length + self.image_seq_length
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = tmp_coordinate
lowerCamelCase__ = tf.constant(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
# text + image
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ = model({'pixel_values': pixel_values} , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = 2
lowerCamelCase__ = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : Union[str, Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a_ : Optional[Any] = False
a_ : List[Any] = False
a_ : str = False
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return True
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=False ):
lowerCamelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFLayoutLMvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
if getattr(SCREAMING_SNAKE_CASE__ , 'hf_compute_loss' , SCREAMING_SNAKE_CASE__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=SCREAMING_SNAKE_CASE__ )[0]
]
lowerCamelCase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ = -1_00
lowerCamelCase__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ = inspect.signature(model.call ).parameters
lowerCamelCase__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ = {0: 'input_ids'}
for label_key in label_keys:
lowerCamelCase__ = signature_names.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = label_key
lowerCamelCase__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ = prepared_for_class[value]
lowerCamelCase__ = tuple(SCREAMING_SNAKE_CASE__ )
# Send to model
lowerCamelCase__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _UpperCamelCase ( self : Dict ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _a ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : str ):
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' ).pixel_values
lowerCamelCase__ = tf.constant([[1, 2]] )
lowerCamelCase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCamelCase__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case ( _a: Callable[[int | float], int | float] , _a: int | float , _a: int | float , _a: int = 100 , )-> float:
'''simple docstring'''
lowerCamelCase__ = x_start
lowerCamelCase__ = fnc(_a )
lowerCamelCase__ = 0.0
for _ in range(_a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCamelCase__ = (x_end - x_start) / steps + xa
lowerCamelCase__ = fnc(_a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCamelCase__ = xa
lowerCamelCase__ = fxa
return area
if __name__ == "__main__":
def snake_case ( _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_snake_case = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ):
requires_backends(self , ['bs4'] )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCamelCase__ = parent.find_all(child.name , recursive=SCREAMING_SNAKE_CASE__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(SCREAMING_SNAKE_CASE__ ) else next(i for i, s in enumerate(SCREAMING_SNAKE_CASE__ , 1 ) if s is child ) )
lowerCamelCase__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for element in html_code.descendants:
if type(SCREAMING_SNAKE_CASE__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCamelCase__ = html.unescape(SCREAMING_SNAKE_CASE__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = self.xpath_soup(SCREAMING_SNAKE_CASE__ )
stringaxtag_seq.append(SCREAMING_SNAKE_CASE__ )
stringaxsubs_seq.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = ''
for tagname, subs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = False
# Check that strings has a valid type
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = True
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
if len(SCREAMING_SNAKE_CASE__ ) == 0 or isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F'but is of type {type(SCREAMING_SNAKE_CASE__ )}.' )
lowerCamelCase__ = bool(isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ )) )
if not is_batched:
lowerCamelCase__ = [html_strings]
# Get nodes + xpaths
lowerCamelCase__ = []
lowerCamelCase__ = []
for html_string in html_strings:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.get_three_from_single(SCREAMING_SNAKE_CASE__ )
nodes.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for node, tag_list, sub_list in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self.construct_xpath(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
xpath_strings.append(SCREAMING_SNAKE_CASE__ )
xpaths.append(SCREAMING_SNAKE_CASE__ )
# return as Dict
lowerCamelCase__ = {'nodes': nodes, 'xpaths': xpaths}
lowerCamelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_inputs
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case ( _a: int = 8 )-> str:
'''simple docstring'''
lowerCamelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def snake_case ( _a: str , _a: int )-> str:
'''simple docstring'''
i -= len(_a )
lowerCamelCase__ = i // 3
lowerCamelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase__ = (
chars_incl
+ random(_a , quotient + remainder )
+ random(_a , _a )
+ random(_a , _a )
)
lowerCamelCase__ = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def snake_case ( _a: str , _a: int )-> str:
'''simple docstring'''
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def snake_case ( _a: Dict , _a: Any )-> Any:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: str , _a: int )-> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: Any , _a: str )-> str:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: str , _a: int = 8 )-> bool:
'''simple docstring'''
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase__ = any(char in ascii_uppercase for char in password )
lowerCamelCase__ = any(char in ascii_lowercase for char in password )
lowerCamelCase__ = any(char in digits for char in password )
lowerCamelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase__ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_a ) )
print(
'Alternative Password generated:' , alternative_password_generator(_a , _a ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = abs(_a )
lowerCamelCase__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = abs(_a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case ( _a: int )-> int:
'''simple docstring'''
return sum(int(_a ) for c in str(abs(_a ) ) )
def snake_case ( )-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a: Callable , _a: int ) -> None:
lowerCamelCase__ = F'{func.__name__}({value})'
lowerCamelCase__ = timeit(F'__main__.{call}' , setup='import __main__' )
print(F'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case ( _a: List[str] , _a: Dict=() , _a: List[Any]=None , _a: List[Any]="no" , _a: Optional[Any]="29500" )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = False
lowerCamelCase__ = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ = True
elif "IPython" in sys.modules:
lowerCamelCase__ = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ = 8
lowerCamelCase__ = PrepareForLaunch(_a , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(_a , args=_a , nprocs=_a , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_a )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr='127.0.01' , master_port=_a , mixed_precision=_a ):
lowerCamelCase__ = PrepareForLaunch(_a , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(_a , args=_a , nprocs=_a , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_a )
def snake_case ( _a: Tuple , _a: Union[str, Any]=() , _a: List[Any]=2 )-> Union[str, Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ = PrepareForLaunch(_a , debug=_a )
start_processes(_a , args=_a , nprocs=_a , start_method='fork' )
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: Optional[int] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCamelCase__ = DetaConfig(
backbone_config=_a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_a , with_box_refine=_a , two_stage=_a , )
# set labels
lowerCamelCase__ = 'huggingface/label-files'
if "o365" in model_name:
lowerCamelCase__ = 366
lowerCamelCase__ = 'object365-id2label.json'
else:
lowerCamelCase__ = 91
lowerCamelCase__ = 'coco-detection-id2label.json'
lowerCamelCase__ = num_labels
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ = {int(_a ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def snake_case ( _a: int )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def snake_case ( _a: Tuple , _a: Optional[int] , _a: Optional[Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = dct.pop(_a )
lowerCamelCase__ = val
def snake_case ( _a: List[str] , _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowerCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:dim, :]
lowerCamelCase__ = in_proj_bias[: dim]
lowerCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ = in_proj_weight[
-dim :, :
]
lowerCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def snake_case ( _a: Union[str, Any] , _a: List[str] )-> int:
'''simple docstring'''
lowerCamelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:hidden_size, :]
lowerCamelCase__ = in_proj_bias[:hidden_size]
lowerCamelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ = in_proj_weight[-hidden_size:, :]
lowerCamelCase__ = in_proj_bias[-hidden_size:]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Optional[Any] , _a: int , _a: int )-> Any:
'''simple docstring'''
lowerCamelCase__ = get_deta_config(_a )
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase__ = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_a , param.shape )
# rename keys
lowerCamelCase__ = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_swin_q_k_v(_a , config.backbone_config )
read_in_decoder_q_k_v(_a , _a )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
if "input_proj" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = DetaForObjectDetection(_a )
model.load_state_dict(_a )
model.eval()
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_a )
# load image processor
lowerCamelCase__ = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(images=_a , return_tensors='pt' )
lowerCamelCase__ = encoding['pixel_values']
lowerCamelCase__ = model(pixel_values.to(_a ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCamelCase__ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
lowerCamelCase__ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
lowerCamelCase__ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_a ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_a ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
processor.save_pretrained(_a )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
from math import factorial
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(_a ) // (factorial(_a ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = CodeGenTokenizer
a_ : List[str] = CodeGenTokenizerFast
a_ : Optional[int] = True
a_ : Union[str, Any] = {'add_prefix_space': True}
a_ : List[str] = False
def _UpperCamelCase ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ = {'unk_token': '<unk>'}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = 'lower newer'
return input_text, output_text
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'lower newer'
# Testing tokenization
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
lowerCamelCase__ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ = ('This is a simple input', 'This is a pair')
lowerCamelCase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase__ = ('This is a simple input', 'This is a pair')
lowerCamelCase__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
lowerCamelCase__ = tokenizer(*SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = '$$$'
lowerCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase__ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase__ = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
lowerCamelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
pass
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : torch.FloatTensor
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.1_82_15 , SCREAMING_SNAKE_CASE__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCamelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCamelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = h
lowerCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = sample
lowerCamelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCamelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def snake_case ( _a: Optional[int] , _a: List[Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = ly_weight['attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case ( _a: List[str] , _a: Dict )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = ly_weight['attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case ( _a: str , _a: Optional[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ = ly_weight['self_attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = ly_weight['MultiHeadDotProductAttention_0']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case ( _a: str )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ = jnp.tree_util.tree_map(onp.array , _a )
lowerCamelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowerCamelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowerCamelCase__ = inference.parse_training_gin_file(_a , _a )
lowerCamelCase__ = inference.InferenceModel(args.checkpoint_path , _a )
lowerCamelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowerCamelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _a )
lowerCamelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _a )
lowerCamelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , _a )
lowerCamelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowerCamelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = 'gpt_bigcode'
a_ : str = ['past_key_values']
a_ : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_02_57 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Tuple="gelu_pytorch_tanh" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_02_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = MobileBertTokenizer
a_ : Any = MobileBertTokenizerFast
a_ : Union[str, Any] = True
a_ : List[Any] = True
a_ : str = filter_non_english
a_ : int = 'google/mobilebert-uncased'
def _UpperCamelCase ( self : Dict ):
super().setUp()
lowerCamelCase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = 'unwanted, running'
return input_text, output_text
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self : Dict ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# With lower casing
lowerCamelCase__ = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self : Dict ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self : int ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _UpperCamelCase ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ['的', '人', '有']
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case ( _a: Optional[int] , _a: Any , _a: Optional[int] , _a: Tuple )-> Any:
'''simple docstring'''
lowerCamelCase__ = s.rsplit(_a , _a )
return new.join(_a )
def snake_case ( _a: List[str] )-> Optional[Any]:
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def snake_case ( _a: str )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase__ = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ = rreplace(_a , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ = rreplace(_a , '.b' , '.bias' , 1 )
lowerCamelCase__ = value.float()
return upgrade
@torch.no_grad()
def snake_case ( _a: List[str] , _a: Union[str, Any] , _a: Any=None , _a: Dict=True )-> int:
'''simple docstring'''
from dall_e import Encoder
lowerCamelCase__ = Encoder()
if os.path.exists(_a ):
lowerCamelCase__ = torch.load(_a )
else:
lowerCamelCase__ = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a , _a ):
lowerCamelCase__ = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
lowerCamelCase__ = FlavaImageCodebookConfig.from_pretrained(_a )
else:
lowerCamelCase__ = FlavaImageCodebookConfig()
lowerCamelCase__ = FlavaImageCodebook(_a ).eval()
lowerCamelCase__ = encoder.state_dict()
lowerCamelCase__ = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
lowerCamelCase__ = hf_model.state_dict()
lowerCamelCase__ = count_parameters(_a )
lowerCamelCase__ = count_parameters(_a )
assert torch.allclose(_a , _a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_snake_case = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_snake_case = logging.getLogger(__name__)
@dataclass
class _a :
a_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _a :
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_a )
datasets.utils.logging.set_verbosity(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = predict_dataset.features['label'].names
# Labels
lowerCamelCase__ = len(_a )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , idalabel={str(_a ): label for i, label in enumerate(_a )} , labelaid={label: i for i, label in enumerate(_a )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ = False
def preprocess_function(_a: List[str] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_a , max_length=data_args.max_seq_length , truncation=_a , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_train_samples )
lowerCamelCase__ = train_dataset.select(range(_a ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase__ = train_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_a ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_eval_samples )
lowerCamelCase__ = eval_dataset.select(range(_a ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase__ = eval_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_predict_samples )
lowerCamelCase__ = predict_dataset.select(range(_a ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase__ = predict_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase__ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a: EvalPrediction ):
lowerCamelCase__ = p.predictions[0] if isinstance(p.predictions , _a ) else p.predictions
lowerCamelCase__ = np.argmax(_a , axis=1 )
return metric.compute(predictions=_a , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ = DataCollatorWithPadding(_a , pad_to_multiple_of=8 )
else:
lowerCamelCase__ = None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=_a , args=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=_a )
lowerCamelCase__ = train_result.metrics
lowerCamelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_a )
)
lowerCamelCase__ = min(_a , len(_a ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _a )
trainer.save_metrics('train' , _a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ = trainer.evaluate(eval_dataset=_a )
lowerCamelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_a )
lowerCamelCase__ = min(_a , len(_a ) )
trainer.log_metrics('eval' , _a )
trainer.save_metrics('eval' , _a )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = trainer.predict(_a , metric_key_prefix='predict' )
lowerCamelCase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_a )
)
lowerCamelCase__ = min(_a , len(_a ) )
trainer.log_metrics('predict' , _a )
trainer.save_metrics('predict' , _a )
lowerCamelCase__ = np.argmax(_a , axis=1 )
lowerCamelCase__ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_a ):
lowerCamelCase__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = KandinskyInpaintPipeline
a_ : List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a_ : Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a_ : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a_ : Tuple = False
@property
def _UpperCamelCase ( self : Optional[int] ):
return 32
@property
def _UpperCamelCase ( self : Dict ):
return 32
@property
def _UpperCamelCase ( self : int ):
return self.time_input_dim
@property
def _UpperCamelCase ( self : str ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Dict ):
return 1_00
@property
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowerCamelCase__ = MultilingualCLIP(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ):
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ )
# create init_image
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCamelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase__ = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _UpperCamelCase ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCamelCase__ = 0
lowerCamelCase__ = 'a hat'
lowerCamelCase__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase__ = pipeline(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: str )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = torch.load(_a , map_location='cpu' )
if "model" in sd.keys():
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['model']
# pop unnecessary weights
lowerCamelCase__ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_a )
lowerCamelCase__ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase__ = sd.pop(_a )
lowerCamelCase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase__ = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase__ = key.replace('.qkv_proj.' , '.q_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.k_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.v_proj.' )
lowerCamelCase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = torch.split(_a , depth // 3 , dim=0 )
lowerCamelCase__ = q
lowerCamelCase__ = k
lowerCamelCase__ = v
del sd[key]
return sd
@torch.no_grad()
def snake_case ( _a: Union[str, Any] , _a: str , _a: Tuple=None )-> Dict:
'''simple docstring'''
lowerCamelCase__ = load_checkpoint(_a )
if config is not None:
lowerCamelCase__ = OPTConfig.from_pretrained(_a )
else:
lowerCamelCase__ = OPTConfig()
lowerCamelCase__ = OPTModel(_a ).half().eval()
model.load_state_dict(_a )
# Check results
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = StableDiffusionXLImgaImgPipeline
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase__ = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=32 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=0 ):
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCamelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCamelCase ( self : Any ):
pass
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# forward without prompt embeds
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * ['this is a negative prompt']
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = 3 * [inputs['prompt']]
lowerCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * ['this is a negative prompt']
lowerCamelCase__ = 3 * [inputs.pop('prompt' )]
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe(
**SCREAMING_SNAKE_CASE__ , prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="cpu" , SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
if len(_a ) != 32:
raise ValueError('Input must be of length 32' )
lowerCamelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case ( _a: int )-> bytes:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCamelCase__ = format(_a , '08x' )[-8:]
lowerCamelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
lowerCamelCase__ = b''
for char in message:
bit_string += format(_a , '08b' ).encode('utf-8' )
lowerCamelCase__ = format(len(_a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case ( _a: bytes )-> Generator[list[int], None, None]:
'''simple docstring'''
if len(_a ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_a ) , 512 ):
lowerCamelCase__ = bit_string[pos : pos + 512]
lowerCamelCase__ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case ( _a: int )-> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCamelCase__ = format(_a , '032b' )
lowerCamelCase__ = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
return (a + b) % 2**32
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
lowerCamelCase__ = preprocess(_a )
lowerCamelCase__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase__ = 0x6745_2301
lowerCamelCase__ = 0xEFCD_AB89
lowerCamelCase__ = 0x98BA_DCFE
lowerCamelCase__ = 0x1032_5476
lowerCamelCase__ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCamelCase__ = aa
lowerCamelCase__ = ba
lowerCamelCase__ = ca
lowerCamelCase__ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase__ = d ^ (b & (c ^ d))
lowerCamelCase__ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase__ = c ^ (d & (b ^ c))
lowerCamelCase__ = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase__ = b ^ c ^ d
lowerCamelCase__ = (3 * i + 5) % 16
else:
lowerCamelCase__ = c ^ (b | not_aa(_a ))
lowerCamelCase__ = (7 * i) % 16
lowerCamelCase__ = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase__ = d
lowerCamelCase__ = c
lowerCamelCase__ = b
lowerCamelCase__ = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int )-> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase__ = 1
if upper_limit > 0:
lowerCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_snake_case = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = value
lowerCamelCase__ = weight
def __repr__( self : List[Any] ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _UpperCamelCase ( self : Union[str, Any] ):
return self.value
def _UpperCamelCase ( self : Dict ):
return self.name
def _UpperCamelCase ( self : Tuple ):
return self.weight
def _UpperCamelCase ( self : Tuple ):
return self.value / self.weight
def snake_case ( _a: str , _a: Optional[int] , _a: Tuple )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for i in range(len(_a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case ( _a: str , _a: Dict , _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = sorted(_a , key=_a , reverse=_a )
lowerCamelCase__ = []
lowerCamelCase__ , lowerCamelCase__ = 0.0, 0.0
for i in range(len(_a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case ( )-> Any:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _a ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a_ : List[Any] = 'sew-d'
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=2_56 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : Optional[int]="layer_norm" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-7 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : str="group" , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.05 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Dict=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = position_buckets
lowerCamelCase__ = share_att_key
lowerCamelCase__ = relative_attention
lowerCamelCase__ = norm_rel_ebd
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = feature_layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def _UpperCamelCase ( self : Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int )-> str:
'''simple docstring'''
lowerCamelCase__ = int(_a )
if decimal in (0, 1): # Exit cases for the recursion
return str(_a )
lowerCamelCase__ , lowerCamelCase__ = divmod(_a , 2 )
return binary_recursive(_a ) + str(_a )
def snake_case ( _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = str(_a ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCamelCase__ = '-' if number.startswith('-' ) else ''
lowerCamelCase__ = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'{negative}0b{binary_recursive(int(_a ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_snake_case = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def snake_case ( _a: str )-> dict[str, int]:
'''simple docstring'''
lowerCamelCase__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case ( _a: tuple )-> str:
'''simple docstring'''
return x[0]
def snake_case ( _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = get_letter_count(_a )
lowerCamelCase__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_a )
lowerCamelCase__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_a )
lowerCamelCase__ = ''.join(freq_to_letter[freq] )
lowerCamelCase__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_a , reverse=_a )
lowerCamelCase__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_a )
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = get_frequency_order(_a )
lowerCamelCase__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ = g.get_repo('huggingface/diffusers' )
lowerCamelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
lowerCamelCase__ = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
from collections import deque
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = deque()
lowerCamelCase__ = [False for _ in range(_a )]
lowerCamelCase__ = [-1 for _ in range(_a )]
lowerCamelCase__ = index_of[:]
def strong_connect(_a: Optional[Any] , _a: str , _a: Optional[int] ):
lowerCamelCase__ = index # the number when this node is seen
lowerCamelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(_a )
lowerCamelCase__ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase__ = strong_connect(_a , _a , _a )
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase__ = []
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(_a )
while w != v:
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(_a )
components.append(_a )
return index
lowerCamelCase__ = []
for v in range(_a ):
if index_of[v] == -1:
strong_connect(_a , 0 , _a )
return components
def snake_case ( _a: Optional[Any] , _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [[] for _ in range(_a )]
for u, v in edges:
g[u].append(_a )
return g
if __name__ == "__main__":
# Test
_snake_case = 7
_snake_case = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_snake_case = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_snake_case = [(u, v) for u, v in zip(source, target)]
_snake_case = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=13 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = t
lowerCamelCase__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = TFLayoutLMModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFLayoutLMForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFLayoutLMForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
a_ : Tuple = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Optional[Any] = False
a_ : int = True
a_ : Optional[Any] = 10
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = TFLayoutLMModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : List[Any] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFLayoutLMModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _UpperCamelCase ( self : str ):
pass
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
# test the sequence output on [0, :3, :3]
lowerCamelCase__ = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase__ = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
@slow
def _UpperCamelCase ( self : str ):
# initialize model with randomly initialized sequence classification head
lowerCamelCase__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = (2,)
self.assertEqual(loss.shape , SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = (2, 2)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized token classification head
lowerCamelCase__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
# initialize model with randomly initialized token classification head
lowerCamelCase__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , SCREAMING_SNAKE_CASE__ )
self.assertEqual(outputs.end_logits.shape , SCREAMING_SNAKE_CASE__ )
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import argparse
import os
import re
_snake_case = "src/diffusers"
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(R"\[([^\]]+)\]")
def snake_case ( _a: str )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = _re_indent.search(_a )
return "" if search is None else search.groups()[0]
def snake_case ( _a: Tuple , _a: List[str]="" , _a: str=None , _a: Optional[Any]=None )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_a ):
index += 1
lowerCamelCase__ = ['\n'.join(lines[:index] )]
else:
lowerCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase__ = [lines[index]]
index += 1
while index < len(_a ) and (end_prompt is None or not lines[index].startswith(_a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_a ) )
if index < len(_a ) - 1:
lowerCamelCase__ = [lines[index + 1]]
index += 1
else:
lowerCamelCase__ = []
else:
blocks.append('\n'.join(_a ) )
lowerCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_a ) > 0:
blocks.append('\n'.join(_a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_a ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def snake_case ( _a: Union[str, Any] )-> int:
'''simple docstring'''
def _inner(_a: Union[str, Any] ):
return key(_a ).lower().replace('_' , '' )
return _inner
def snake_case ( _a: Dict , _a: int=None )-> Dict:
'''simple docstring'''
def noop(_a: List[Any] ):
return x
if key is None:
lowerCamelCase__ = noop
# Constants are all uppercase, they go first.
lowerCamelCase__ = [obj for obj in objects if key(_a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase__ = [obj for obj in objects if key(_a )[0].isupper() and not key(_a ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase__ = [obj for obj in objects if not key(_a )[0].isupper()]
lowerCamelCase__ = ignore_underscore(_a )
return sorted(_a , key=_a ) + sorted(_a , key=_a ) + sorted(_a , key=_a )
def snake_case ( _a: str )-> List[str]:
'''simple docstring'''
def _replace(_a: List[Any] ):
lowerCamelCase__ = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowerCamelCase__ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(_a )] ) + "]"
lowerCamelCase__ = import_statement.split('\n' )
if len(_a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase__ = 2 if lines[1].strip() == '[' else 1
lowerCamelCase__ = [(i, _re_strip_line.search(_a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase__ = sort_objects(_a , key=lambda _a : x[1] )
lowerCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase__ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ = keys[:-1]
lowerCamelCase__ = get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(_a )] )
return "\n".join(_a )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase__ = _re_bracket_content.sub(_replace , _a )
return import_statement
def snake_case ( _a: List[Any] , _a: Any=True )-> List[str]:
'''simple docstring'''
with open(_a , 'r' ) as f:
lowerCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase__ = split_code_in_indented_blocks(
_a , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase__ = main_blocks[block_idx]
lowerCamelCase__ = block.split('\n' )
# Get to the start of the imports.
lowerCamelCase__ = 0
while line_idx < len(_a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase__ = len(_a )
else:
line_idx += 1
if line_idx >= len(_a ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase__ = '\n'.join(block_lines[line_idx:-1] )
lowerCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase__ = split_code_in_indented_blocks(_a , indent_level=_a )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase__ = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase__ = [(pattern.search(_a ).groups()[0] if pattern.search(_a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase__ = [(i, key) for i, key in enumerate(_a ) if key is not None]
lowerCamelCase__ = [x[0] for x in sorted(_a , key=lambda _a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase__ = 0
lowerCamelCase__ = []
for i in range(len(_a ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_a )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase__ = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_a ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(_a , 'w' ) as f:
f.write('\n'.join(_a ) )
def snake_case ( _a: List[str]=True )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
for root, _, files in os.walk(_a ):
if "__init__.py" in files:
lowerCamelCase__ = sort_imports(os.path.join(_a , '__init__.py' ) , check_only=_a )
if result:
lowerCamelCase__ = [os.path.join(_a , '__init__.py' )]
if len(_a ) > 0:
raise ValueError(F'Would overwrite {len(_a )} files, run `make style`.' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import math
def snake_case ( _a: list , _a: int = 0 , _a: int = 0 )-> list:
'''simple docstring'''
lowerCamelCase__ = end or len(_a )
for i in range(_a , _a ):
lowerCamelCase__ = i
lowerCamelCase__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase__ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase__ = temp_index_value
return array
def snake_case ( _a: list , _a: int , _a: int )-> None: # Max Heap
'''simple docstring'''
lowerCamelCase__ = index
lowerCamelCase__ = 2 * index + 1 # Left Node
lowerCamelCase__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase__ = right_index
if largest != index:
lowerCamelCase__ , lowerCamelCase__ = array[largest], array[index]
heapify(_a , _a , _a )
def snake_case ( _a: list )-> list:
'''simple docstring'''
lowerCamelCase__ = len(_a )
for i in range(n // 2 , -1 , -1 ):
heapify(_a , _a , _a )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase__ , lowerCamelCase__ = array[0], array[i]
heapify(_a , 0 , _a )
return array
def snake_case ( _a: list , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def snake_case ( _a: list , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = low
lowerCamelCase__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase__ , lowerCamelCase__ = array[j], array[i]
i += 1
def snake_case ( _a: list )-> list:
'''simple docstring'''
if len(_a ) == 0:
return array
lowerCamelCase__ = 2 * math.ceil(math.loga(len(_a ) ) )
lowerCamelCase__ = 16
return intro_sort(_a , 0 , len(_a ) , _a , _a )
def snake_case ( _a: list , _a: int , _a: int , _a: int , _a: int )-> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_a )
max_depth -= 1
lowerCamelCase__ = median_of_a(_a , _a , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase__ = partition(_a , _a , _a , _a )
intro_sort(_a , _a , _a , _a , _a )
lowerCamelCase__ = p
return insertion_sort(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by a comma : ").strip()
_snake_case = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _a ( SCREAMING_SNAKE_CASE_ ):
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = 8
# DPR tok
lowerCamelCase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase__ = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ = {'unk_token': '<unk>'}
lowerCamelCase__ = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Dict ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _UpperCamelCase ( self : str ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _UpperCamelCase ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _UpperCamelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_dummy_dataset()
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCamelCase__ = dataset
lowerCamelCase__ = RagRetriever(
SCREAMING_SNAKE_CASE__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : bool ):
lowerCamelCase__ = self.get_dummy_dataset()
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
lowerCamelCase__ = os.path.join(self.tmpdirname , 'dataset' )
lowerCamelCase__ = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
lowerCamelCase__ = RagRetriever(
SCREAMING_SNAKE_CASE__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase__ = RagRetriever(
SCREAMING_SNAKE_CASE__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE__ ) , )
return retriever
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
lowerCamelCase__ = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
lowerCamelCase__ = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE__ , open(SCREAMING_SNAKE_CASE__ , 'wb' ) )
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
lowerCamelCase__ = RagRetriever(
SCREAMING_SNAKE_CASE__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCamelCase__ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_legacy_index_retriever()
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(SCREAMING_SNAKE_CASE__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCamelCase ( self : Dict ):
import torch
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__ = [[5, 7], [10, 11]]
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
lowerCamelCase__ = retriever(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE__ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [[5, 7], [10, 11]]
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase__ = retriever(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , SCREAMING_SNAKE_CASE__ ) # check for doc token related keys in dictionary.
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
_snake_case = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bytes ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
lowerCamelCase__ = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : bytes ):
lowerCamelCase__ = B'\x80' + (B'\x00' * (63 - (len(SCREAMING_SNAKE_CASE__ ) + 8) % 64))
lowerCamelCase__ = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE__ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self : Dict ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE__ ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowerCamelCase__ = self.ror(SCREAMING_SNAKE_CASE__ , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowerCamelCase__ = self.ror(SCREAMING_SNAKE_CASE__ , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = ''.join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations)
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ):
import hashlib
lowerCamelCase__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() )
def snake_case ( )-> None:
'''simple docstring'''
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(_a , 'utf-8' )
print(SHAaaa(_a ).hash )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = 'vit'
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : str=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_24 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , **SCREAMING_SNAKE_CASE__ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = encoder_stride
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self : List[Any] ):
return 1e-4
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case ( _a: Optional[int] , _a: Optional[Any] , _a: int )-> Tuple:
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase__ = GPTaConfig()
else:
lowerCamelCase__ = GPTaConfig.from_json_file(_a )
lowerCamelCase__ = GPTaModel(_a )
# Load weights from numpy
load_tf_weights_in_gpta(_a , _a , _a )
# Save pytorch-model
lowerCamelCase__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _a )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_snake_case = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = ['image_processor', 'tokenizer']
a_ : List[str] = 'BlipImageProcessor'
a_ : int = 'AutoTokenizer'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = False
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_processor
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : ImageInput = None , SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__ )
return encoding_image_processor
def _UpperCamelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[List[List[str]]] , SCREAMING_SNAKE_CASE__ : List[List[str]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE__ , hypotheses=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ )
}
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _a ( unittest.TestCase ):
a_ : Dict = StableDiffusionLDMaDPipeline
a_ : int = TEXT_TO_IMAGE_PARAMS
a_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
lowerCamelCase__ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs['prompt']]
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs.pop('prompt' )]
lowerCamelCase__ = ldmad_pipe.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
lowerCamelCase__ = text_inputs['input_ids'].to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.text_encoder(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = prompt_embeds
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'french fries'
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
lowerCamelCase__ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="cpu" , SCREAMING_SNAKE_CASE__ : int=torch.floataa , SCREAMING_SNAKE_CASE__ : Tuple=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCamelCase__ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
lowerCamelCase__ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]="cpu" , SCREAMING_SNAKE_CASE__ : Dict=torch.floataa , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.49_55_86
lowerCamelCase__ = 0.33_79_55_15
lowerCamelCase__ = 1_12.4_85_18
lowerCamelCase__ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.4_19_41_27
lowerCamelCase__ = 0.35_37_55_86
lowerCamelCase__ = 0.5_63_85_02
lowerCamelCase__ = 0.34_68_61_03
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]="" , SCREAMING_SNAKE_CASE__ : Dict="train" ):
assert os.path.isdir(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
lowerCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE__ )
def __len__( self : Union[str, Any] ):
return len(self.documents )
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.documents[idx]
lowerCamelCase__ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as source:
lowerCamelCase__ = source.read()
lowerCamelCase__ , lowerCamelCase__ = process_story(SCREAMING_SNAKE_CASE__ )
return document_name, story_lines, summary_lines
def snake_case ( _a: List[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = list(filter(lambda _a : len(_a ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ = [_add_missing_period(_a ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ = []
lowerCamelCase__ = deque(_a )
while True:
try:
lowerCamelCase__ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ = list(filter(lambda _a : not t.startswith('@highlight' ) , _a ) )
return story_lines, summary_lines
def snake_case ( _a: Tuple )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case ( _a: Optional[int] , _a: int , _a: Optional[Any] )-> str:
'''simple docstring'''
if len(_a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_a )) )
return sequence
def snake_case ( _a: str , _a: List[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = torch.ones_like(_a )
lowerCamelCase__ = sequence == pad_token_id
lowerCamelCase__ = 0
return mask
def snake_case ( _a: Any , _a: Tuple , _a: Optional[int] )-> int:
'''simple docstring'''
lowerCamelCase__ = [tokenizer.encode(_a ) for line in story_lines]
lowerCamelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ = [tokenizer.encode(_a ) for line in summary_lines]
lowerCamelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case ( _a: int , _a: str )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for sequence in batch:
lowerCamelCase__ = -1
lowerCamelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_a )
return torch.tensor(_a )
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = OpenAIGPTTokenizer
a_ : str = OpenAIGPTTokenizerFast
a_ : Tuple = True
a_ : Any = False
def _UpperCamelCase ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
return "lower newer", "lower newer"
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ = 'lower'
lowerCamelCase__ = ['low', 'er</w>']
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + ['<unk>']
lowerCamelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ = ('This is a simple input', 'This is a pair')
lowerCamelCase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
def _UpperCamelCase ( self : str ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ ):
pass
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(SCREAMING_SNAKE_CASE__ )
from datasets import load_dataset
lowerCamelCase__ = load_dataset('nielsr/rvlcdip-demo' )
lowerCamelCase__ = dataset['train'][0]['image'].convert('RGB' )
lowerCamelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_snake_case = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False ):
lowerCamelCase__ = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["PerceiverFeatureExtractor"]
_snake_case = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : int = ['keras_nlp']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
requires_backends(self , ['keras_nlp'] )
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = RoCBertTokenizer
a_ : Optional[int] = None
a_ : Any = False
a_ : Union[str, Any] = True
a_ : str = filter_non_english
def _UpperCamelCase ( self : List[Any] ):
super().setUp()
lowerCamelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCamelCase__ = {}
lowerCamelCase__ = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = i
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self : List[Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self : Optional[Any] ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self : str ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _UpperCamelCase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ['的', '人', '有']
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.encode('你好' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode('你是谁' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = '你好,你是谁'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def snake_case ( _a: list[int] , _a: int , _a: int = 0 , _a: int = -1 )-> int:
'''simple docstring'''
if hi < 0:
lowerCamelCase__ = len(_a )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def snake_case ( _a: list[int] , _a: int , _a: int = 0 , _a: int = -1 )-> int:
'''simple docstring'''
if hi < 0:
lowerCamelCase__ = len(_a )
while lo < hi:
lowerCamelCase__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid
return lo
def snake_case ( _a: list[int] , _a: int , _a: int = 0 , _a: int = -1 )-> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(_a , _a , _a , _a ) , _a )
def snake_case ( _a: list[int] , _a: int , _a: int = 0 , _a: int = -1 )-> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(_a , _a , _a , _a ) , _a )
def snake_case ( _a: list[int] , _a: int )-> int | None:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a ) - 1
while left <= right:
lowerCamelCase__ = left + (right - left) // 2
lowerCamelCase__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase__ = midpoint - 1
else:
lowerCamelCase__ = midpoint + 1
return None
def snake_case ( _a: list[int] , _a: int )-> int | None:
'''simple docstring'''
lowerCamelCase__ = bisect.bisect_left(_a , _a )
if index != len(_a ) and sorted_collection[index] == item:
return index
return None
def snake_case ( _a: list[int] , _a: int , _a: int , _a: int )-> int | None:
'''simple docstring'''
if right < left:
return None
lowerCamelCase__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_a , _a , _a , midpoint - 1 )
else:
return binary_search_by_recursion(_a , _a , midpoint + 1 , _a )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import random
def snake_case ( _a: int , _a: float , _a: bool = False )-> dict:
'''simple docstring'''
lowerCamelCase__ = {i: [] for i in range(_a )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_a )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_a ):
for j in range(i + 1 , _a ):
if random.random() < probability:
graph[i].append(_a )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_a )
return graph
def snake_case ( _a: int )-> dict:
'''simple docstring'''
return {
i: [j for j in range(_a ) if i != j] for i in range(_a )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import heapq
import sys
import numpy as np
_snake_case = tuple[int, int]
class _a :
def __init__( self : List[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = set()
def _UpperCamelCase ( self : List[str] ):
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def _UpperCamelCase ( self : int ):
return len(self.elements ) == 0
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE__ )
else:
# update
# print("update", item)
lowerCamelCase__ = []
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _UpperCamelCase ( self : str ):
return self.elements[0][1]
def _UpperCamelCase ( self : int ):
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE__ )
return (priority, item)
def snake_case ( _a: TPos , _a: TPos )-> Any:
'''simple docstring'''
lowerCamelCase__ = np.array(_a )
lowerCamelCase__ = np.array(_a )
return np.linalg.norm(a - b )
def snake_case ( _a: TPos , _a: TPos )-> Optional[Any]:
'''simple docstring'''
return consistent_heuristic(_a , _a ) // t
def snake_case ( _a: TPos , _a: TPos )-> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( _a: TPos , _a: int , _a: TPos , _a: dict[TPos, float] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = g_function[start] + Wa * heuristics[i](_a , _a )
return ans
def snake_case ( _a: Any , _a: List[Any] , _a: List[Any] )-> str:
'''simple docstring'''
lowerCamelCase__ = np.chararray((n, n) )
for i in range(_a ):
for j in range(_a ):
lowerCamelCase__ = '*'
for i in range(_a ):
for j in range(_a ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ = '#'
lowerCamelCase__ = '-'
lowerCamelCase__ = back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) = x
# print(x)
lowerCamelCase__ = '-'
lowerCamelCase__ = back_pointer[x]
lowerCamelCase__ = '-'
for i in range(_a ):
for j in range(_a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCamelCase__ = back_pointer[goal]
while x != start:
print(_a , end=' ' )
lowerCamelCase__ = back_pointer[x]
print(_a )
sys.exit()
def snake_case ( _a: TPos )-> str:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( _a: Dict , _a: str , _a: List[str] , _a: Tuple , _a: Optional[Any] , _a: str , _a: Dict , _a: Optional[int] , )-> Any:
'''simple docstring'''
for itera in range(_a ):
open_list[itera].remove_element(_a )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) = s
lowerCamelCase__ = (x - 1, y)
lowerCamelCase__ = (x + 1, y)
lowerCamelCase__ = (x, y + 1)
lowerCamelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_a )
lowerCamelCase__ = -1
lowerCamelCase__ = float('inf' )
if valid(_a ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ = g_function[s] + 1
lowerCamelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(_a , key(_a , 0 , _a , _a ) )
if neighbours not in close_list_inad:
for var in range(1 , _a ):
if key(_a , _a , _a , _a ) <= Wa * key(
_a , 0 , _a , _a ):
open_list[j].put(
_a , key(_a , _a , _a , _a ) )
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_snake_case = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_snake_case = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_snake_case = make_common_ground()
_snake_case = blocks_blk
# hyper parameters
_snake_case = 1
_snake_case = 1
_snake_case = 20
_snake_case = 3 # one consistent and two other inconsistent
# start and end destination
_snake_case = (0, 0)
_snake_case = (n - 1, n - 1)
_snake_case = 1
def snake_case ( _a: TPos , _a: TPos , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = {start: 0, goal: float('inf' )}
lowerCamelCase__ = {start: -1, goal: -1}
lowerCamelCase__ = []
lowerCamelCase__ = set()
for i in range(_a ):
open_list.append(PriorityQueue() )
open_list[i].put(_a , key(_a , _a , _a , _a ) )
lowerCamelCase__ = []
lowerCamelCase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_a , _a , _a )
else:
lowerCamelCase__ , lowerCamelCase__ = open_list[i].top_show()
visited.add(_a )
expand_state(
_a , _a , _a , _a , _a , _a , _a , _a , )
close_list_inad.append(_a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_a , _a , _a )
else:
lowerCamelCase__ = open_list[0].top_show()
visited.add(_a )
expand_state(
_a , 0 , _a , _a , _a , _a , _a , _a , )
close_list_anchor.append(_a )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_a ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 3_84
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 9
lowerCamelCase__ = 1
lowerCamelCase__ = None
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Tuple = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Union[str, Any] = False
a_ : Dict = False
a_ : str = False
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFConvBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : int ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = True
if hasattr(SCREAMING_SNAKE_CASE__ , 'use_cache' ):
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = len(model(SCREAMING_SNAKE_CASE__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ , saved_model=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'saved_model' , '1' )
lowerCamelCase__ = tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = outputs['encoder_hidden_states']
lowerCamelCase__ = outputs['encoder_attentions']
else:
lowerCamelCase__ = outputs['hidden_states']
lowerCamelCase__ = outputs['attentions']
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = [1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
for i in range(_a , _a ):
if array[i] == target:
return i
return -1
def snake_case ( _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a )
while left <= right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase__ = one_third - 1
elif array[two_third] < target:
lowerCamelCase__ = two_third + 1
else:
lowerCamelCase__ = one_third + 1
lowerCamelCase__ = two_third - 1
else:
return -1
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_a , one_third - 1 , _a , _a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _a , _a , _a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _a , _a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_snake_case = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
_snake_case = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = ['input_ids', 'attention_mask']
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : str="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : str="m2m100" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE__ : int=8 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(SCREAMING_SNAKE_CASE__ )
for lang_code in fairseq_language_code
if self.get_lang_token(SCREAMING_SNAKE_CASE__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , language_codes=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(SCREAMING_SNAKE_CASE__ , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(SCREAMING_SNAKE_CASE__ ): self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE__ )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else 'en'
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _UpperCamelCase ( self : List[str] ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder[self.unk_token] )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = []
lowerCamelCase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = Path(SCREAMING_SNAKE_CASE__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , SCREAMING_SNAKE_CASE__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.spm_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (str(SCREAMING_SNAKE_CASE__ ), str(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "en" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "ro" , **SCREAMING_SNAKE_CASE__ : str , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_lang_id(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : Optional[int] ):
self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : Tuple ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.get_lang_token(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.get_lang_token(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
return self.lang_code_to_token[lang]
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.get_lang_token(SCREAMING_SNAKE_CASE__ )
return self.lang_token_to_id[lang_token]
def snake_case ( _a: str , _a: Dict[str, Any] )-> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def snake_case ( _a: str )-> Union[Dict, List]:
'''simple docstring'''
with open(_a , 'r' ) as f:
return json.load(_a )
def snake_case ( _a: str , _a: str )-> None:
'''simple docstring'''
with open(_a , 'w' ) as f:
json.dump(_a , _a , indent=2 )
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_snake_case = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def snake_case ( _a: int )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
state_dict.pop('pixel_mean' , _a )
state_dict.pop('pixel_std' , _a )
lowerCamelCase__ = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ = key.replace(_a , _a )
if re.match(_a , _a ):
lowerCamelCase__ = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__ = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowerCamelCase__ = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowerCamelCase__ = key.replace('layers.2' , 'proj_out' )
lowerCamelCase__ = value
lowerCamelCase__ = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def snake_case ( _a: Dict , _a: Optional[Any] , _a: Union[str, Any] , _a: Optional[Any]="ybelkada/segment-anything" )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = hf_hub_download(_a , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowerCamelCase__ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase__ = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase__ = SamConfig(
vision_config=_a , )
lowerCamelCase__ = torch.load(_a , map_location='cpu' )
lowerCamelCase__ = replace_keys(_a )
lowerCamelCase__ = SamImageProcessor()
lowerCamelCase__ = SamProcessor(image_processor=_a )
lowerCamelCase__ = SamModel(_a )
hf_model.load_state_dict(_a )
lowerCamelCase__ = hf_model.to('cuda' )
lowerCamelCase__ = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
lowerCamelCase__ = [[[400, 650]]]
lowerCamelCase__ = [[1]]
lowerCamelCase__ = processor(images=np.array(_a ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
lowerCamelCase__ = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
lowerCamelCase__ = ((75, 275, 1725, 850),)
lowerCamelCase__ = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
lowerCamelCase__ = [[[400, 650], [800, 650]]]
lowerCamelCase__ = [[1, 1]]
lowerCamelCase__ = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
_snake_case = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_snake_case = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
def update_area_of_max_square(_a: int , _a: int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase__ = update_area_of_max_square(_a , col + 1 )
lowerCamelCase__ = update_area_of_max_square(row + 1 , col + 1 )
lowerCamelCase__ = update_area_of_max_square(row + 1 , _a )
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down] )
lowerCamelCase__ = max(largest_square_area[0] , _a )
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_a: int , _a: int , _a: list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase__ = update_area_of_max_square_using_dp_array(_a , col + 1 , _a )
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _a )
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , _a , _a )
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down] )
lowerCamelCase__ = max(largest_square_area[0] , _a )
lowerCamelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
lowerCamelCase__ = [[-1] * cols for _ in range(_a )]
update_area_of_max_square_using_dp_array(0 , 0 , _a )
return largest_square_area[0]
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
lowerCamelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ = dp_array[row][col + 1]
lowerCamelCase__ = dp_array[row + 1][col + 1]
lowerCamelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(_a , _a , _a )
lowerCamelCase__ = max(dp_array[row][col] , _a )
else:
lowerCamelCase__ = 0
return largest_square_area
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ = current_row[col + 1]
lowerCamelCase__ = next_row[col + 1]
lowerCamelCase__ = next_row[col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(_a , _a , _a )
lowerCamelCase__ = max(current_row[col] , _a )
else:
lowerCamelCase__ = 0
lowerCamelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_snake_case = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_snake_case = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_snake_case = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowerCamelCase__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = 'gptj'
a_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=5_04_00 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=40_96 , SCREAMING_SNAKE_CASE__ : str=28 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any="gelu_new" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=5_02_56 , SCREAMING_SNAKE_CASE__ : Dict=5_02_56 , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : str = "default" , SCREAMING_SNAKE_CASE__ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , 'pad_token_id' , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
lowerCamelCase__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _UpperCamelCase ( self : Optional[int] ):
return self._config.n_layer
@property
def _UpperCamelCase ( self : Dict ):
return self._config.n_head
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ):
lowerCamelCase__ = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase__ = ordered_inputs['attention_mask'].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self : List[str] ):
return 13
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import operator
def snake_case ( _a: list , _a: bool = False , _a: list | None = None )-> list:
'''simple docstring'''
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(_a ):
if _operator(_a , sublist[-1] ):
sublist.append(_a )
arr.pop(_a )
# merging sublist into solution list
if not solution:
solution.extend(_a )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(_a ):
if not _operator(_a , _a ):
solution.insert(_a , _a )
break
else:
solution.append(_a )
strand_sort(_a , _a , _a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def snake_case ( _a: Matrix , _a: Matrix )-> Matrix:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = [[0 for _ in range(size + 1 )] for _ in range(_a )]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for row in range(_a ):
for col in range(_a ):
lowerCamelCase__ = matrix[row][col]
lowerCamelCase__ = vector[row][0]
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while row < size and col < size:
# pivoting
lowerCamelCase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_a , _a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCamelCase__ , lowerCamelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a ):
lowerCamelCase__ = augmented[rowa][col] / augmented[row][col]
lowerCamelCase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a ):
for row in range(_a ):
lowerCamelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_a )
]
def snake_case ( _a: list[int] )-> Callable[[int], int]:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = [[0 for _ in range(_a )] for _ in range(_a )]
lowerCamelCase__ = [[0] for _ in range(_a )]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for x_val, y_val in enumerate(_a ):
for col in range(_a ):
lowerCamelCase__ = (x_val + 1) ** (size - col - 1)
lowerCamelCase__ = y_val
lowerCamelCase__ = solve(_a , _a )
def interpolated_func(_a: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_a ) )
return interpolated_func
def snake_case ( _a: int )-> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case ( _a: Callable[[int], int] = question_function , _a: int = 10 )-> int:
'''simple docstring'''
lowerCamelCase__ = [func(_a ) for x_val in range(1 , order + 1 )]
lowerCamelCase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCamelCase__ = 0
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for poly in polynomials:
lowerCamelCase__ = 1
while func(_a ) == poly(_a ):
x_val += 1
ret += poly(_a )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _a :
a_ : List[str]
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default='Translation' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCamelCase ( self : str ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class _a :
a_ : Optional[List] = None
a_ : Optional[int] = None
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default='TranslationVariableLanguages' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase__ = len(self.languages ) if self.languages else None
def __call__( self : str ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE__ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE__ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__ = []
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase__ , lowerCamelCase__ = zip(*sorted(SCREAMING_SNAKE_CASE__ ) )
return {"language": languages, "translation": translations}
def _UpperCamelCase ( self : int ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A__ : Union[str, Any] = False
try:
A__ : Optional[int] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = [] )-> Any:
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = choices
UpperCAmelCase__ : Optional[int] = prompt
if sys.platform == "win32":
UpperCAmelCase__ : List[Any] = "*"
else:
UpperCAmelCase__ : Optional[int] = "➔ "
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "" )-> Any:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCamelCase )
else:
forceWrite(self.choices[index] , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(__UpperCamelCase )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = 1 )-> List[str]:
UpperCAmelCase__ : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCamelCase )
move_cursor(__UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self )-> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self )-> Optional[int]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self )-> Tuple:
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self )-> str:
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCamelCase )] for number in range(10 )] )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Any = int(chr(self.current_selection ) )
UpperCAmelCase__ : Optional[int] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCamelCase )
else:
return
else:
return
def lowerCAmelCase__ ( self , __UpperCamelCase = 0 )-> Tuple:
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase__ : Optional[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCamelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase__ : str = int(builtins.input() )
except ValueError:
UpperCAmelCase__ : List[Any] = default_choice
else:
UpperCAmelCase__ : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__UpperCamelCase , "\n" )
return choice
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A__ : Optional[int] = 250_004
A__ : Tuple = 250_020
@require_sentencepiece
@require_tokenizers
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = MBartTokenizer
_A = MBartTokenizerFast
_A = True
_A = True
def lowerCAmelCase__ ( self )-> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Dict = MBartTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = MBartTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self )-> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : str = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : str = tokenizer_r.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : str = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Optional[Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : int = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Any = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = 'facebook/mbart-large-en-ro'
_A = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_A = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_A = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowerCAmelCase__ ( cls )-> List[Any]:
UpperCAmelCase__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase__ : Any = 1
return cls
def lowerCAmelCase__ ( self )-> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
UpperCAmelCase__ : Dict = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
UpperCAmelCase__ : List[str] = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = 10
UpperCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCamelCase )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_26, 25_00_01] )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Tuple = MBartTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase__ : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase__ : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase__ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase__ : Dict = targets["input_ids"]
UpperCAmelCase__ : List[Any] = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 30_34, 2, 25_00_04]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Dict = list[tuple[int, int]]
A__ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[Any]:
UpperCAmelCase__ : List[str] = pos_x
UpperCAmelCase__ : Any = pos_y
UpperCAmelCase__ : Dict = (pos_y, pos_x)
UpperCAmelCase__ : List[Any] = goal_x
UpperCAmelCase__ : Optional[Any] = goal_y
UpperCAmelCase__ : Optional[Any] = g_cost
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : List[Any] = self.calculate_heuristic()
def lowerCAmelCase__ ( self )-> float:
UpperCAmelCase__ : int = abs(self.pos_x - self.goal_x )
UpperCAmelCase__ : List[str] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , __UpperCamelCase )-> bool:
return self.f_cost < other.f_cost
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [self.start]
UpperCAmelCase__ : list[Node] = []
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self )-> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase__ : Tuple = True
return self.retrace_path(__UpperCamelCase )
self.closed_nodes.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = self.get_successors(__UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
UpperCAmelCase__ : Tuple = self.open_nodes.pop(self.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCamelCase )
else:
self.open_nodes.append(__UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase__ ( self , __UpperCamelCase )-> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
for action in delta:
UpperCAmelCase__ : Dict = parent.pos_x + action[1]
UpperCAmelCase__ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCamelCase , ) )
return successors
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Path:
UpperCAmelCase__ : Dict = node
UpperCAmelCase__ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ : List[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A__ : Union[str, Any] = (0, 0)
A__ : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
A__ : int = GreedyBestFirst(init, goal)
A__ : str = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A__ : int = 2
for elem in grid:
print(elem)
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A__ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'BlipImageProcessor'
_A = 'AutoTokenizer'
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any:
super().__init__(__UpperCamelCase , __UpperCamelCase )
# add QFormer tokenizer
UpperCAmelCase__ : Optional[Any] = qformer_tokenizer
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
UpperCAmelCase__ : Dict = BatchFeature()
if text is not None:
UpperCAmelCase__ : List[str] = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
encoding.update(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.qformer_tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Any = qformer_text_encoding.pop("input_ids" )
UpperCAmelCase__ : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
UpperCAmelCase__ : Tuple = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = self.tokenizer.model_input_names
UpperCAmelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
if os.path.isfile(__UpperCamelCase ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
UpperCAmelCase__ : int = os.path.join(__UpperCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__UpperCamelCase )
return super().save_pretrained(__UpperCamelCase , **__UpperCamelCase )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase , subfolder="qformer_tokenizer" )
UpperCAmelCase__ : Tuple = cls._get_arguments_from_pretrained(__UpperCamelCase , **__UpperCamelCase )
args.append(__UpperCamelCase )
return cls(*__UpperCamelCase )
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
A__ : int = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : list , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = int(math.floor(math.sqrt(lowerCAmelCase ) ) )
UpperCAmelCase__ : List[Any] = 0
while arr[min(lowerCAmelCase , lowerCAmelCase ) - 1] < x:
UpperCAmelCase__ : List[Any] = step
step += int(math.floor(math.sqrt(lowerCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase__ : Any = prev + 1
if prev == min(lowerCAmelCase , lowerCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A__ : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
A__ : Tuple = [int(item) for item in user_input.split(""",""")]
A__ : Tuple = int(input("""Enter the number to be searched:\n"""))
A__ : List[str] = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
A__ : str = """"""
A__ : List[str] = """"""
A__ : Any = """"""
A__ : str = """"""
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
# authorize twitter, initialize tweepy
UpperCAmelCase__ : Dict = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Dict = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase__ : str = api.user_timeline(screen_name=lowerCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
UpperCAmelCase__ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase__ : Union[str, Any] = api.user_timeline(
screen_name=lowerCAmelCase , count=200 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
UpperCAmelCase__ : Optional[Any] = alltweets[-1].id - 1
print(F"...{len(lowerCAmelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase__ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , "w" ) as f:
UpperCAmelCase__ : str = csv.writer(lowerCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , )-> int:
UpperCAmelCase__ : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Any = max_resolution
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : Any = do_center_crop
UpperCAmelCase__ : Optional[int] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_normalize
UpperCAmelCase__ : str = image_mean
UpperCAmelCase__ : Dict = image_std
def lowerCAmelCase__ ( self )-> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = LevitImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase__ ( self )-> int:
pass
def lowerCAmelCase__ ( self )-> List[str]:
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self )-> Optional[Any]:
# Initialize image_processing
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase__ : Dict = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase__ : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase__ : Optional[int] = [0 for i in range(0 , 4 * size )]
UpperCAmelCase__ : Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return idx * 2
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return idx * 2 + 1
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if left_element == right_element:
UpperCAmelCase__ : Optional[int] = a[left_element - 1]
else:
UpperCAmelCase__ : List[Any] = (left_element + right_element) // 2
self.build(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.build(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = max(
self.segment_tree[self.left(__UpperCamelCase )] , self.segment_tree[self.right(__UpperCamelCase )] )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
if self.flag[idx] is True:
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : str = False
if left_element != right_element:
UpperCAmelCase__ : Optional[Any] = self.lazy[idx]
UpperCAmelCase__ : Dict = self.lazy[idx]
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase__ : Any = val
if left_element != right_element:
UpperCAmelCase__ : Dict = val
UpperCAmelCase__ : Tuple = val
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = True
return True
UpperCAmelCase__ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.update(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = max(
self.segment_tree[self.left(__UpperCamelCase )] , self.segment_tree[self.right(__UpperCamelCase )] )
return True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int | float:
if self.flag[idx] is True:
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : List[str] = False
if left_element != right_element:
UpperCAmelCase__ : Any = self.lazy[idx]
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase__ : Optional[Any] = (left_element + right_element) // 2
UpperCAmelCase__ : str = self.query(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = self.query(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return max(__UpperCamelCase , __UpperCamelCase )
def __str__( self )-> str:
return str([self.query(1 , 1 , self.size , __UpperCamelCase , __UpperCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A__ : Optional[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A__ : str = 15
A__ : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Any = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = SpeechTaTokenizer
_A = False
_A = True
def lowerCAmelCase__ ( self )-> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : List[str] = SpeechTaTokenizer(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = AddedToken("<mask>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
UpperCAmelCase__ : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : int = "this is a test"
UpperCAmelCase__ : Tuple = "this is a test"
return input_text, output_text
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=20 , __UpperCamelCase=5 )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_input_output_texts(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = "<pad>"
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(__UpperCamelCase ) , 81 )
def lowerCAmelCase__ ( self )-> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : str = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ : Optional[Any] = tokenizer.vocab_size
UpperCAmelCase__ : int = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ : Union[str, Any] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCAmelCase__ : str = tokenizer.add_tokens(__UpperCamelCase )
UpperCAmelCase__ : str = tokenizer.vocab_size
UpperCAmelCase__ : Dict = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) )
UpperCAmelCase__ : str = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase__ : Dict = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCAmelCase__ : Optional[int] = tokenizer.add_special_tokens(__UpperCamelCase )
UpperCAmelCase__ : List[str] = tokenizer.vocab_size
UpperCAmelCase__ : List[Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self )-> Tuple:
pass
def lowerCAmelCase__ ( self )-> List[str]:
pass
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__UpperCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def lowerCAmelCase__ ( self )-> str:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase__ : int = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
UpperCAmelCase__ : str = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__UpperCamelCase , )
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowercase :
'''simple docstring'''
_A = BlenderbotConfig
_A = {}
_A = 'gelu'
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , )-> List[str]:
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Tuple = eos_token_id
UpperCAmelCase__ : List[Any] = pad_token_id
UpperCAmelCase__ : Optional[int] = bos_token_id
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Any = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Optional[Any] = TFBlenderbotModel(config=__UpperCamelCase ).get_decoder()
UpperCAmelCase__ : Optional[Any] = inputs_dict["input_ids"]
UpperCAmelCase__ : str = input_ids[:1, :]
UpperCAmelCase__ : Tuple = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase__ : Tuple = inputs_dict["head_mask"]
UpperCAmelCase__ : Optional[int] = 1
# first forward pass
UpperCAmelCase__ : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
UpperCAmelCase__ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : List[str] = tf.cast(tf.math.not_equal(lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_A = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = TFBlenderbotModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = ['My friends are cool but they eat too many carbs.']
_A = 'facebook/blenderbot-400M-distill'
@cached_property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase__ : str = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.