code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_snake_case = get_logger(__name__)
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = module._original_module if isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) else module
class _a :
a_ : Any = []
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split('.' )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : int ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
lowerCamelCase__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(SCREAMING_SNAKE_CASE__ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE__ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()['__builtins__'][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.original.pop(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Any ):
self.__enter__()
self._active_patches.append(self )
def _UpperCamelCase ( self : Tuple ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["PerceiverFeatureExtractor"]
_snake_case = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def snake_case ( _a: list[float] )-> Optional[Any]:
'''simple docstring'''
return np.maximum(0 , _a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case ( _a: Callable[[int | float], int | float] , _a: int | float , _a: int | float , _a: int = 100 , )-> float:
'''simple docstring'''
lowerCamelCase__ = x_start
lowerCamelCase__ = fnc(_a )
lowerCamelCase__ = 0.0
for _ in range(_a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCamelCase__ = (x_end - x_start) / steps + xa
lowerCamelCase__ = fnc(_a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCamelCase__ = xa
lowerCamelCase__ = fxa
return area
if __name__ == "__main__":
def snake_case ( _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bytes ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
lowerCamelCase__ = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : bytes ):
lowerCamelCase__ = B'\x80' + (B'\x00' * (63 - (len(SCREAMING_SNAKE_CASE__ ) + 8) % 64))
lowerCamelCase__ = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE__ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self : Dict ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE__ ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowerCamelCase__ = self.ror(SCREAMING_SNAKE_CASE__ , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowerCamelCase__ = self.ror(SCREAMING_SNAKE_CASE__ , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = ''.join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations)
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ):
import hashlib
lowerCamelCase__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() )
def snake_case ( )-> None:
'''simple docstring'''
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(_a , 'utf-8' )
print(SHAaaa(_a ).hash )
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class _a :
a_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to log verbose messages or not.'} , )
a_ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
a_ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
a_ : Optional[float] = field(
default=0.999995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def snake_case ( _a: ModelArguments , _a: TrainingArguments )-> Dict:
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase__ = logging.WARNING
if model_args.verbose_logging:
lowerCamelCase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCamelCase__ = logging.INFO
logger.setLevel(_a )
@dataclass
class _a :
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
a_ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
a_ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a_ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a_ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class _a :
a_ : WavaVecaForPreTraining
a_ : WavaVecaFeatureExtractor
a_ : Union[bool, str] = "longest"
a_ : Optional[int] = None
a_ : Optional[int] = None
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
lowerCamelCase__ = self.feature_extractor.pad(
SCREAMING_SNAKE_CASE__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
lowerCamelCase__ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
lowerCamelCase__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCamelCase__ = 1
lowerCamelCase__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCamelCase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=SCREAMING_SNAKE_CASE__ , min_masks=2 , )
return batch
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 0
lowerCamelCase__ = max_gumbel_temp
lowerCamelCase__ = min_gumbel_temp
lowerCamelCase__ = gumbel_temp_decay
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : nn.Module , SCREAMING_SNAKE_CASE__ : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
lowerCamelCase__ = self._prepare_inputs(SCREAMING_SNAKE_CASE__ )
if self.use_amp:
with autocast():
lowerCamelCase__ = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = self.compute_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase__ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE__ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
configure_logger(_a , _a )
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_a )
def prepare_dataset(_a: List[Any] ):
# check that all files have the correct sampling rate
lowerCamelCase__ , lowerCamelCase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCamelCase__ = datasets.map(
_a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
lowerCamelCase__ = vectorized_datasets.filter(
lambda _a : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_a: List[str] ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCamelCase__ = vectorized_datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCamelCase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
lowerCamelCase__ = WavaVecaForPreTraining(_a )
lowerCamelCase__ = DataCollatorForWavaVecaPretraining(model=_a , feature_extractor=_a )
lowerCamelCase__ = WavaVecaPreTrainer(
model=_a , data_collator=_a , args=_a , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
from PIL import Image
def snake_case ( _a: Image , _a: float )-> Image:
'''simple docstring'''
def brightness(_a: int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_snake_case = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
def update_area_of_max_square(_a: int , _a: int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase__ = update_area_of_max_square(_a , col + 1 )
lowerCamelCase__ = update_area_of_max_square(row + 1 , col + 1 )
lowerCamelCase__ = update_area_of_max_square(row + 1 , _a )
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down] )
lowerCamelCase__ = max(largest_square_area[0] , _a )
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_a: int , _a: int , _a: list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase__ = update_area_of_max_square_using_dp_array(_a , col + 1 , _a )
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _a )
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , _a , _a )
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down] )
lowerCamelCase__ = max(largest_square_area[0] , _a )
lowerCamelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
lowerCamelCase__ = [[-1] * cols for _ in range(_a )]
update_area_of_max_square_using_dp_array(0 , 0 , _a )
return largest_square_area[0]
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
lowerCamelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ = dp_array[row][col + 1]
lowerCamelCase__ = dp_array[row + 1][col + 1]
lowerCamelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(_a , _a , _a )
lowerCamelCase__ = max(dp_array[row][col] , _a )
else:
lowerCamelCase__ = 0
return largest_square_area
def snake_case ( _a: int , _a: int , _a: list[list[int]] )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__ = current_row[col + 1]
lowerCamelCase__ = next_row[col + 1]
lowerCamelCase__ = next_row[col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(_a , _a , _a )
lowerCamelCase__ = max(current_row[col] , _a )
else:
lowerCamelCase__ = 0
lowerCamelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
import qiskit
def snake_case ( _a: int , _a: int )-> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCamelCase__ = qiskit.Aer.get_backend('aer_simulator' )
lowerCamelCase__ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase__ = qiskit.execute(_a , _a , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_a )
if __name__ == "__main__":
_snake_case = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
_snake_case = TypeVar("T")
_snake_case = Union[List[T], Tuple[T, ...]]
_snake_case = Union[T, List[T], Dict[str, T]]
_snake_case = Union[str, bytes, os.PathLike]
| 659 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( SCREAMING_SNAKE_CASE_ ):
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as input_file:
lowerCamelCase__ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
lowerCamelCase__ = input_file.read()
lowerCamelCase__ = regexp.search(SCREAMING_SNAKE_CASE__ )
return match
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as input_file:
lowerCamelCase__ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
lowerCamelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase__ = regexp.finditer(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = Path('./datasets' )
lowerCamelCase__ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(SCREAMING_SNAKE_CASE__ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = Path('./datasets' )
lowerCamelCase__ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(SCREAMING_SNAKE_CASE__ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.dummy_uncond_unet
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type='numpy' ).images
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type='numpy' , return_dict=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = 'google/ddpm-cifar10-32'
lowerCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 659 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = StableDiffusionXLImgaImgPipeline
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase__ = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=32 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=0 ):
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCamelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCamelCase ( self : Any ):
pass
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# forward without prompt embeds
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * ['this is a negative prompt']
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = 3 * [inputs['prompt']]
lowerCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * ['this is a negative prompt']
lowerCamelCase__ = 3 * [inputs.pop('prompt' )]
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe(
**SCREAMING_SNAKE_CASE__ , prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="cpu" , SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = value
lowerCamelCase__ = weight
def __repr__( self : List[Any] ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _UpperCamelCase ( self : Union[str, Any] ):
return self.value
def _UpperCamelCase ( self : Dict ):
return self.name
def _UpperCamelCase ( self : Tuple ):
return self.weight
def _UpperCamelCase ( self : Tuple ):
return self.value / self.weight
def snake_case ( _a: str , _a: Optional[int] , _a: Tuple )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for i in range(len(_a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case ( _a: str , _a: Dict , _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = sorted(_a , key=_a , reverse=_a )
lowerCamelCase__ = []
lowerCamelCase__ , lowerCamelCase__ = 0.0, 0.0
for i in range(len(_a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case ( )-> Any:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 1 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from math import factorial
def snake_case ( _a: int = 20 )-> int:
'''simple docstring'''
lowerCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase__ = n // 2
return int(factorial(_a ) / (factorial(_a ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 659 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 1 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = tokenizer(example['content'] , truncation=_a )['input_ids']
lowerCamelCase__ = len(example['content'] ) / len(output['input_ids'] )
return output
_snake_case = HfArgumentParser(PretokenizationArguments)
_snake_case = parser.parse_args()
if args.num_workers is None:
_snake_case = multiprocessing.cpu_count()
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_snake_case = time.time()
_snake_case = load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
_snake_case = time.time()
_snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
_snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 659 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["YolosFeatureExtractor"]
_snake_case = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = 30
lowerCamelCase__ = self.seq_length + self.mem_len
lowerCamelCase__ = 15
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = [10, 50, 80]
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 8
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 2
lowerCamelCase__ = None
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = 3
lowerCamelCase__ = self.vocab_size - 1
lowerCamelCase__ = 0.01
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ , lowerCamelCase__ = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a_ : int = () if is_tf_available() else ()
a_ : List[Any] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a_ : Optional[Any] = False
a_ : Dict = False
a_ : Any = False
a_ : Any = False
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFTransfoXLModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[str] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase__ = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
lowerCamelCase__ = model.get_bias()
assert name is None
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
def _UpperCamelCase ( self : int ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def _UpperCamelCase ( self : Dict ):
pass
@require_tf
class _a ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
lowerCamelCase__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
_snake_case = {str(digit): digit**5 for digit in range(10)}
def snake_case ( _a: int )-> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_a ) )
def snake_case ( )-> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_a ) )
if __name__ == "__main__":
print(solution())
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = 'gptj'
a_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=5_04_00 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=40_96 , SCREAMING_SNAKE_CASE__ : str=28 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any="gelu_new" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=5_02_56 , SCREAMING_SNAKE_CASE__ : Dict=5_02_56 , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : str = "default" , SCREAMING_SNAKE_CASE__ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , 'pad_token_id' , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
lowerCamelCase__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _UpperCamelCase ( self : Optional[int] ):
return self._config.n_layer
@property
def _UpperCamelCase ( self : Dict ):
return self._config.n_head
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ):
lowerCamelCase__ = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase__ = ordered_inputs['attention_mask'].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self : List[str] ):
return 13
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_snake_case = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def snake_case ( _a: int )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
state_dict.pop('pixel_mean' , _a )
state_dict.pop('pixel_std' , _a )
lowerCamelCase__ = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ = key.replace(_a , _a )
if re.match(_a , _a ):
lowerCamelCase__ = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__ = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowerCamelCase__ = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowerCamelCase__ = key.replace('layers.2' , 'proj_out' )
lowerCamelCase__ = value
lowerCamelCase__ = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def snake_case ( _a: Dict , _a: Optional[Any] , _a: Union[str, Any] , _a: Optional[Any]="ybelkada/segment-anything" )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = hf_hub_download(_a , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowerCamelCase__ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase__ = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase__ = SamConfig(
vision_config=_a , )
lowerCamelCase__ = torch.load(_a , map_location='cpu' )
lowerCamelCase__ = replace_keys(_a )
lowerCamelCase__ = SamImageProcessor()
lowerCamelCase__ = SamProcessor(image_processor=_a )
lowerCamelCase__ = SamModel(_a )
hf_model.load_state_dict(_a )
lowerCamelCase__ = hf_model.to('cuda' )
lowerCamelCase__ = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
lowerCamelCase__ = [[[400, 650]]]
lowerCamelCase__ = [[1]]
lowerCamelCase__ = processor(images=np.array(_a ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
lowerCamelCase__ = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
lowerCamelCase__ = ((75, 275, 1725, 850),)
lowerCamelCase__ = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
lowerCamelCase__ = [[[400, 650], [800, 650]]]
lowerCamelCase__ = [[1, 1]]
lowerCamelCase__ = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**_a )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
_snake_case = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_snake_case = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = StableDiffusionControlNetImgaImgPipeline
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
a_ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 2
lowerCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , )
lowerCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self : Dict ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = StableDiffusionControlNetImgaImgPipeline
a_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(SCREAMING_SNAKE_CASE__ : Any ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
lowerCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(0 )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase__ = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 2
lowerCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE__ , device=torch.device(SCREAMING_SNAKE_CASE__ ) , ),
]
lowerCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((64, 64) )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 10.0
lowerCamelCase__ = 4
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = steps
lowerCamelCase__ = scale
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _UpperCamelCase ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ , controlnet=SCREAMING_SNAKE_CASE__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ = 'evil space-punk bird'
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
lowerCamelCase__ = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
lowerCamelCase__ = pipe(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , control_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = "" , SCREAMING_SNAKE_CASE__ : bool = False ):
# Mapping from the first character of the prefix of the node
lowerCamelCase__ = {}
# A node will be a leaf if the tree contains its word
lowerCamelCase__ = is_leaf
lowerCamelCase__ = prefix
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowerCamelCase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase__ = RadixNode(prefix=SCREAMING_SNAKE_CASE__ , is_leaf=SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = self.nodes[word[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase__ = remaining_prefix
lowerCamelCase__ = self.nodes[matching_string[0]]
lowerCamelCase__ = RadixNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = aux_node
if remaining_word == "":
lowerCamelCase__ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCamelCase__ = list(self.nodes.values() )[0]
lowerCamelCase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCamelCase__ = False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase__ = list(incoming_node.nodes.values() )[0]
lowerCamelCase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase__ = merging_node.nodes
return True
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 0 ):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case ( )-> bool:
'''simple docstring'''
lowerCamelCase__ = 'banana bananas bandana band apple all beast'.split()
lowerCamelCase__ = RadixNode()
root.insert_many(_a )
assert all(root.find(_a ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def snake_case ( )-> None:
'''simple docstring'''
assert test_trie()
def snake_case ( )-> None:
'''simple docstring'''
lowerCamelCase__ = RadixNode()
lowerCamelCase__ = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(_a )
print('Words:' , _a )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : uuid.UUID = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
if not conversation_id:
lowerCamelCase__ = uuid.uuida()
if past_user_inputs is None:
lowerCamelCase__ = []
if generated_responses is None:
lowerCamelCase__ = []
lowerCamelCase__ = conversation_id
lowerCamelCase__ = past_user_inputs
lowerCamelCase__ = generated_responses
lowerCamelCase__ = text
def __eq__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCamelCase__ = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCamelCase__ = text
def _UpperCamelCase ( self : Any ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCamelCase__ = None
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
self.generated_responses.append(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[str] ):
lowerCamelCase__ = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCamelCase__ = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ):
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.tokenizer.pad_token_id is None:
lowerCamelCase__ = self.tokenizer.eos_token
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = {}
if min_length_for_response is not None:
lowerCamelCase__ = min_length_for_response
if minimum_tokens is not None:
lowerCamelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
lowerCamelCase__ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE__ : List[str]=0 , **SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) == 1:
return outputs[0]
return outputs
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Conversation , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCamelCase__ = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCamelCase__ = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE__ )
if self.framework == "pt":
lowerCamelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCamelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=10 , **SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCamelCase__ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCamelCase__ = max_length - minimum_tokens
lowerCamelCase__ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCamelCase__ = model_inputs['attention_mask'][:, -trim:]
lowerCamelCase__ = model_inputs.pop('conversation' )
lowerCamelCase__ = max_length
lowerCamelCase__ = self.model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.model.config.is_encoder_decoder:
lowerCamelCase__ = 1
else:
lowerCamelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=True ):
lowerCamelCase__ = model_outputs['output_ids']
lowerCamelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE__ )
return conversation
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Conversation ):
lowerCamelCase__ = self.tokenizer.eos_token_id
lowerCamelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > self.tokenizer.model_max_length:
lowerCamelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case = logging.get_logger("transformers.models.speecht5")
_snake_case = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_snake_case = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_snake_case = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_snake_case = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_snake_case = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_snake_case = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_snake_case = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_snake_case = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = []
_snake_case = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_snake_case = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_snake_case = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_snake_case = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def snake_case ( _a: List[Any] , _a: str , _a: Any , _a: str , _a: Tuple )-> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
lowerCamelCase__ = getattr(_a , _a )
if weight_type is not None:
lowerCamelCase__ = getattr(_a , _a ).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def snake_case ( _a: Optional[Any] , _a: List[str] )-> Any:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case ( _a: int , _a: Union[str, Any] , _a: Tuple )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
if task == "s2t":
lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase__ = MAPPING_S2T
lowerCamelCase__ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCamelCase__ = None
lowerCamelCase__ = MAPPING_T2S
lowerCamelCase__ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase__ = MAPPING_S2S
lowerCamelCase__ = IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_a , _a ):
logger.info(F'{name} was ignored' )
continue
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCamelCase__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(_a )[0].split('.' )[-2]
lowerCamelCase__ = mapped_key.replace('*' , _a )
if "weight_g" in name:
lowerCamelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ = 'weight_v'
elif "bias" in name:
lowerCamelCase__ = 'bias'
elif "weight" in name:
lowerCamelCase__ = 'weight'
elif "running_mean" in name:
lowerCamelCase__ = 'running_mean'
elif "running_var" in name:
lowerCamelCase__ = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase__ = 'num_batches_tracked'
else:
lowerCamelCase__ = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( _a: Tuple , _a: Tuple , _a: Dict , _a: Dict , _a: Optional[Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ = name.split('.' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_a )
@torch.no_grad()
def snake_case ( _a: Optional[int] , _a: int , _a: int , _a: Dict=None , _a: Optional[Any]=None , _a: Any=None , )-> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCamelCase__ = SpeechTaConfig.from_pretrained(_a )
else:
lowerCamelCase__ = SpeechTaConfig()
if task == "s2t":
lowerCamelCase__ = config.max_text_positions
lowerCamelCase__ = SpeechTaForSpeechToText(_a )
elif task == "t2s":
lowerCamelCase__ = 1876
lowerCamelCase__ = 600
lowerCamelCase__ = config.max_speech_positions
lowerCamelCase__ = SpeechTaForTextToSpeech(_a )
elif task == "s2s":
lowerCamelCase__ = 1876
lowerCamelCase__ = config.max_speech_positions
lowerCamelCase__ = SpeechTaForSpeechToSpeech(_a )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
lowerCamelCase__ = SpeechTaTokenizer(_a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken('<mask>' , lstrip=_a , rstrip=_a )
lowerCamelCase__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
lowerCamelCase__ = SpeechTaFeatureExtractor()
lowerCamelCase__ = SpeechTaProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(_a )
lowerCamelCase__ = torch.load(_a )
recursively_load_weights(fairseq_checkpoint['model'] , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: int , _a: int )-> list[list[int]]:
'''simple docstring'''
lowerCamelCase__ = []
create_all_state(1 , _a , _a , [] , _a )
return result
def snake_case ( _a: int , _a: int , _a: int , _a: list[int] , _a: list[list[int]] , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_a , total_number - level + 2 ):
current_list.append(_a )
create_all_state(i + 1 , _a , level - 1 , _a , _a )
current_list.pop()
def snake_case ( _a: list[list[int]] )-> None:
'''simple docstring'''
for i in total_list:
print(*_a )
if __name__ == "__main__":
_snake_case = 4
_snake_case = 2
_snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = KandinskyVaaControlnetImgaImgPipeline
a_ : List[str] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
a_ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
a_ : Optional[int] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a_ : Union[str, Any] = False
@property
def _UpperCamelCase ( self : str ):
return 32
@property
def _UpperCamelCase ( self : int ):
return 32
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Tuple ):
return 1_00
@property
def _UpperCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self : Tuple ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((2_56, 2_56) )
# create hint
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ = init_image.resize((5_12, 5_12) )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 2_55.0
lowerCamelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase__ = 'A robot, 4k photo'
lowerCamelCase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.85 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt='' , ).to_tuple()
lowerCamelCase__ = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 0
def _UpperCamelCase ( self : List[Any] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = AutoConfig.for_model('roberta' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'fake-roberta' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(type(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
try:
AutoConfig.register('custom' , SCREAMING_SNAKE_CASE__ )
# Wrong model type will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoConfig.register('model' , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoConfig.register('bert' , SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCamelCase ( self : int ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase__ = AutoConfig.from_pretrained('bert-base' )
def _UpperCamelCase ( self : Any ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='aaaaaa' )
def _UpperCamelCase ( self : List[Any] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def _UpperCamelCase ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def _UpperCamelCase ( self : Tuple ):
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = 'new-model'
try:
AutoConfig.register('new-model' , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
lowerCamelCase__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_snake_case = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 659 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import operator
def snake_case ( _a: list , _a: bool = False , _a: list | None = None )-> list:
'''simple docstring'''
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(_a ):
if _operator(_a , sublist[-1] ):
sublist.append(_a )
arr.pop(_a )
# merging sublist into solution list
if not solution:
solution.extend(_a )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(_a ):
if not _operator(_a , _a ):
solution.insert(_a , _a )
break
else:
solution.append(_a )
strand_sort(_a , _a , _a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = 384
if "tiny" in model_name:
lowerCamelCase__ = [3, 3, 9, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [128, 256, 512, 1024]
lowerCamelCase__ = 512
if "large" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [192, 384, 768, 1536]
lowerCamelCase__ = 768
if "xlarge" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [256, 512, 1024, 2048]
lowerCamelCase__ = 1024
# set label information
lowerCamelCase__ = 150
lowerCamelCase__ = 'huggingface/label-files'
lowerCamelCase__ = 'ade20k-id2label.json'
lowerCamelCase__ = json.load(open(hf_hub_download(_a , _a , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(_a ): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = ConvNextConfig(
depths=_a , hidden_sizes=_a , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCamelCase__ = UperNetConfig(
backbone_config=_a , auxiliary_in_channels=_a , num_labels=_a , idalabel=_a , labelaid=_a , )
return config
def snake_case ( _a: Optional[int] )-> int:
'''simple docstring'''
lowerCamelCase__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def snake_case ( _a: int , _a: Any , _a: Any )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = dct.pop(_a )
lowerCamelCase__ = val
def snake_case ( _a: Union[str, Any] , _a: Tuple , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCamelCase__ = model_name_to_url[model_name]
lowerCamelCase__ = torch.hub.load_state_dict_from_url(_a , map_location='cpu' )['state_dict']
lowerCamelCase__ = get_upernet_config(_a )
lowerCamelCase__ = UperNetForSemanticSegmentation(_a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(_a )
if "bn" in key:
lowerCamelCase__ = key.replace('bn' , 'batch_norm' )
lowerCamelCase__ = val
# rename keys
lowerCamelCase__ = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
model.load_state_dict(_a )
# verify on image
lowerCamelCase__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
lowerCamelCase__ = SegformerImageProcessor()
lowerCamelCase__ = processor(_a , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCamelCase__ = model(_a )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_a )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case ( _a: Optional[int] , _a: Any , _a: Optional[int] , _a: Tuple )-> Any:
'''simple docstring'''
lowerCamelCase__ = s.rsplit(_a , _a )
return new.join(_a )
def snake_case ( _a: List[str] )-> Optional[Any]:
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def snake_case ( _a: str )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase__ = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ = rreplace(_a , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ = rreplace(_a , '.b' , '.bias' , 1 )
lowerCamelCase__ = value.float()
return upgrade
@torch.no_grad()
def snake_case ( _a: List[str] , _a: Union[str, Any] , _a: Any=None , _a: Dict=True )-> int:
'''simple docstring'''
from dall_e import Encoder
lowerCamelCase__ = Encoder()
if os.path.exists(_a ):
lowerCamelCase__ = torch.load(_a )
else:
lowerCamelCase__ = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a , _a ):
lowerCamelCase__ = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
lowerCamelCase__ = FlavaImageCodebookConfig.from_pretrained(_a )
else:
lowerCamelCase__ = FlavaImageCodebookConfig()
lowerCamelCase__ = FlavaImageCodebook(_a ).eval()
lowerCamelCase__ = encoder.state_dict()
lowerCamelCase__ = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
lowerCamelCase__ = hf_model.state_dict()
lowerCamelCase__ = count_parameters(_a )
lowerCamelCase__ = count_parameters(_a )
assert torch.allclose(_a , _a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_snake_case = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 659 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ = g.get_repo('huggingface/diffusers' )
lowerCamelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
lowerCamelCase__ = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def snake_case ( _a: Optional[int] , _a: List[Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = ly_weight['attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case ( _a: List[str] , _a: Dict )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = ly_weight['attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case ( _a: str , _a: Optional[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_a )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ = weights[F'layers_{lyr_num}']
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ = ly_weight['self_attention']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = ly_weight['MultiHeadDotProductAttention_0']
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowerCamelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case ( _a: str )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ = jnp.tree_util.tree_map(onp.array , _a )
lowerCamelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowerCamelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowerCamelCase__ = inference.parse_training_gin_file(_a , _a )
lowerCamelCase__ = inference.InferenceModel(args.checkpoint_path , _a )
lowerCamelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowerCamelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _a )
lowerCamelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _a )
lowerCamelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , _a )
lowerCamelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowerCamelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case ( _a: Optional[int] , _a: List[Any] , _a: List[Any]=None )-> str:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = random.Random()
lowerCamelCase__ = 1
for dim in shape:
total_dims *= dim
lowerCamelCase__ = []
for _ in range(_a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase__ = np.array(_a , dtype=jnp.intaa ).reshape(_a )
return output
def snake_case ( _a: Optional[Any] , _a: Tuple=None )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = ids_tensor(_a , vocab_size=2 , rng=_a )
# make sure that at least one token is attended to for each batch
lowerCamelCase__ = 1
return attn_mask
@require_flax
class _a :
a_ : Optional[Any] = None
a_ : List[Any] = ()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase__ = 2
lowerCamelCase__ = inputs['input_ids'].shape[-1] // 2
lowerCamelCase__ = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 0
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowerCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , flax_model.params )
lowerCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE__ ).sequences
lowerCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
lowerCamelCase__ = 0.8
lowerCamelCase__ = 10
lowerCamelCase__ = 0.3
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = 2
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCamelCase__ = 'Hello world'
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'do_samples' ):
model.generate(SCREAMING_SNAKE_CASE__ , do_samples=SCREAMING_SNAKE_CASE__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'foo' ):
lowerCamelCase__ = {'foo': 'bar'}
model.generate(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a_ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
a_ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
a_ : str = "question"
a_ : str = "context"
a_ : str = "answers"
@property
def _UpperCamelCase ( self : int ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 659 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_snake_case = {
"facebook/blenderbot_small-90M": 512,
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = VOCAB_FILES_NAMES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = BlenderbotSmallTokenizer
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE__ , merges=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , ) , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = add_prefix_space
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ):
lowerCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 659 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = LEDTokenizer
a_ : Optional[Any] = LEDTokenizerFast
a_ : Optional[int] = True
def _UpperCamelCase ( self : Optional[int] ):
super().setUp()
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ = {'unk_token': '<unk>'}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , **SCREAMING_SNAKE_CASE__ : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Optional[int] ):
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def _UpperCamelCase ( self : int ):
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_torch
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
@require_torch
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _UpperCamelCase ( self : Optional[int] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = ['A long paragraph for summarization.']
lowerCamelCase__ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
lowerCamelCase__ = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
lowerCamelCase__ = inputs['input_ids']
lowerCamelCase__ = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ = ['Summary of the text.', 'Another summary.']
lowerCamelCase__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output['input_ids']]
lowerCamelCase__ = tokenizer.pad(SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
pass
def _UpperCamelCase ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 659 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[List[List[str]]] , SCREAMING_SNAKE_CASE__ : List[List[str]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE__ , hypotheses=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ )
}
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int = 1 , _a: int = 1000 )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for divide_by_number in range(_a , digit + 1 ):
lowerCamelCase__ = []
lowerCamelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_a ):
lowerCamelCase__ = len(_a )
lowerCamelCase__ = divide_by_number
else:
has_been_divided.append(_a )
lowerCamelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
from torch import nn
def snake_case ( _a: Dict )-> List[str]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
from math import factorial
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(_a ) // (factorial(_a ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = LongformerTokenizer
a_ : Union[str, Any] = True
a_ : Tuple = LongformerTokenizerFast
a_ : Any = True
def _UpperCamelCase ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ = {'unk_token': '<unk>'}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , **SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = 'lower newer'
return input_text, output_text
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowerCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(
'sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = 'Encode this sequence.'
lowerCamelCase__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing spaces after special tokens
lowerCamelCase__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )} ) # mask token has a left space
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'Encode <mask> sequence'
lowerCamelCase__ = 'Encode <mask>sequence'
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
pass
def _UpperCamelCase ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _UpperCamelCase ( self : Optional[Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['trim_offsets'] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ = F'{text_of_1_token} {text_of_1_token}'
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: list[int] , _a: list[int] )-> None:
'''simple docstring'''
lowerCamelCase__ = len(_a )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase__ = 0
print(_a , end=',' )
# Consider rest of the activities
for j in range(_a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_a , end=',' )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [1, 3, 0, 5, 8, 5]
_snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
import heapq
import sys
import numpy as np
_snake_case = tuple[int, int]
class _a :
def __init__( self : List[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = set()
def _UpperCamelCase ( self : List[str] ):
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def _UpperCamelCase ( self : int ):
return len(self.elements ) == 0
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE__ )
else:
# update
# print("update", item)
lowerCamelCase__ = []
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _UpperCamelCase ( self : str ):
return self.elements[0][1]
def _UpperCamelCase ( self : int ):
((lowerCamelCase__) , (lowerCamelCase__)) = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE__ )
return (priority, item)
def snake_case ( _a: TPos , _a: TPos )-> Any:
'''simple docstring'''
lowerCamelCase__ = np.array(_a )
lowerCamelCase__ = np.array(_a )
return np.linalg.norm(a - b )
def snake_case ( _a: TPos , _a: TPos )-> Optional[Any]:
'''simple docstring'''
return consistent_heuristic(_a , _a ) // t
def snake_case ( _a: TPos , _a: TPos )-> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( _a: TPos , _a: int , _a: TPos , _a: dict[TPos, float] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = g_function[start] + Wa * heuristics[i](_a , _a )
return ans
def snake_case ( _a: Any , _a: List[Any] , _a: List[Any] )-> str:
'''simple docstring'''
lowerCamelCase__ = np.chararray((n, n) )
for i in range(_a ):
for j in range(_a ):
lowerCamelCase__ = '*'
for i in range(_a ):
for j in range(_a ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ = '#'
lowerCamelCase__ = '-'
lowerCamelCase__ = back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) = x
# print(x)
lowerCamelCase__ = '-'
lowerCamelCase__ = back_pointer[x]
lowerCamelCase__ = '-'
for i in range(_a ):
for j in range(_a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCamelCase__ = back_pointer[goal]
while x != start:
print(_a , end=' ' )
lowerCamelCase__ = back_pointer[x]
print(_a )
sys.exit()
def snake_case ( _a: TPos )-> str:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( _a: Dict , _a: str , _a: List[str] , _a: Tuple , _a: Optional[Any] , _a: str , _a: Dict , _a: Optional[int] , )-> Any:
'''simple docstring'''
for itera in range(_a ):
open_list[itera].remove_element(_a )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) = s
lowerCamelCase__ = (x - 1, y)
lowerCamelCase__ = (x + 1, y)
lowerCamelCase__ = (x, y + 1)
lowerCamelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_a )
lowerCamelCase__ = -1
lowerCamelCase__ = float('inf' )
if valid(_a ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ = g_function[s] + 1
lowerCamelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(_a , key(_a , 0 , _a , _a ) )
if neighbours not in close_list_inad:
for var in range(1 , _a ):
if key(_a , _a , _a , _a ) <= Wa * key(
_a , 0 , _a , _a ):
open_list[j].put(
_a , key(_a , _a , _a , _a ) )
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_snake_case = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_snake_case = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_snake_case = make_common_ground()
_snake_case = blocks_blk
# hyper parameters
_snake_case = 1
_snake_case = 1
_snake_case = 20
_snake_case = 3 # one consistent and two other inconsistent
# start and end destination
_snake_case = (0, 0)
_snake_case = (n - 1, n - 1)
_snake_case = 1
def snake_case ( _a: TPos , _a: TPos , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = {start: 0, goal: float('inf' )}
lowerCamelCase__ = {start: -1, goal: -1}
lowerCamelCase__ = []
lowerCamelCase__ = set()
for i in range(_a ):
open_list.append(PriorityQueue() )
open_list[i].put(_a , key(_a , _a , _a , _a ) )
lowerCamelCase__ = []
lowerCamelCase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_a , _a , _a )
else:
lowerCamelCase__ , lowerCamelCase__ = open_list[i].top_show()
visited.add(_a )
expand_state(
_a , _a , _a , _a , _a , _a , _a , _a , )
close_list_inad.append(_a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_a , _a , _a )
else:
lowerCamelCase__ = open_list[0].top_show()
visited.add(_a )
expand_state(
_a , 0 , _a , _a , _a , _a , _a , _a , )
close_list_anchor.append(_a )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_a ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case = "\nHuman: <<task>>\n\nAssistant: "
_snake_case = "huggingface-tools/default-prompts"
_snake_case = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def snake_case ( _a: List[str] , _a: Dict , _a: List[str]="run" )-> Optional[int]:
'''simple docstring'''
if prompt_or_repo_id is None:
lowerCamelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _a ) is not None:
return prompt_or_repo_id
lowerCamelCase__ = cached_file(
_a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_a , 'r' , encoding='utf-8' ) as f:
return f.read()
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=13 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = t
lowerCamelCase__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = TFLayoutLMModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFLayoutLMForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFLayoutLMForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
a_ : Tuple = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Optional[Any] = False
a_ : int = True
a_ : Optional[Any] = 10
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = TFLayoutLMModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : List[Any] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFLayoutLMModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _UpperCamelCase ( self : str ):
pass
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
# test the sequence output on [0, :3, :3]
lowerCamelCase__ = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase__ = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
@slow
def _UpperCamelCase ( self : str ):
# initialize model with randomly initialized sequence classification head
lowerCamelCase__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = (2,)
self.assertEqual(loss.shape , SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = (2, 2)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized token classification head
lowerCamelCase__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
# initialize model with randomly initialized token classification head
lowerCamelCase__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
# test the shape of the logits
lowerCamelCase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , SCREAMING_SNAKE_CASE__ )
self.assertEqual(outputs.end_logits.shape , SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = (DPMSolverSDEScheduler,)
a_ : Dict = 10
def _UpperCamelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _UpperCamelCase ( self : Union[str, Any] ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ , use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: str )-> list:
'''simple docstring'''
if n_term == "":
return []
lowerCamelCase__ = []
for temp in range(int(_a ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 659 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.sql.Selectable"] , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Tuple , ):
super().__init__(features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = Sql(
cache_dir=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , sql=SCREAMING_SNAKE_CASE__ , con=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , )
# Build dataset for splits
lowerCamelCase__ = self.builder.as_dataset(
split='train' , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
lowerCamelCase__ = dataset
lowerCamelCase__ = name
lowerCamelCase__ = con
lowerCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase__ = num_proc
lowerCamelCase__ = to_sql_kwargs
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.to_sql_kwargs.pop('sql' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.to_sql_kwargs.pop('con' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.to_sql_kwargs.pop('index' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self._write(index=SCREAMING_SNAKE_CASE__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = args
lowerCamelCase__ = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
lowerCamelCase__ = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase__ = batch.to_pandas()
lowerCamelCase__ = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return num_rows or len(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCamelCase__ , lowerCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
for i in range(_a , _a ):
if array[i] == target:
return i
return -1
def snake_case ( _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a )
while left <= right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase__ = one_third - 1
elif array[two_third] < target:
lowerCamelCase__ = two_third + 1
else:
lowerCamelCase__ = one_third + 1
lowerCamelCase__ = two_third - 1
else:
return -1
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_a , one_third - 1 , _a , _a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _a , _a , _a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _a , _a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCamelCase__ = TensorFlowBenchmark(args=_a )
try:
lowerCamelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCamelCase__ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
lowerCamelCase__ = ' '.join(str(_a ).split(' ' )[:-1] )
lowerCamelCase__ = ''
lowerCamelCase__ = eval(str(_a ).split(' ' )[-1] )
lowerCamelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_a )
if len(_a ) > 0:
lowerCamelCase__ = full_error_msg + begin_error_msg + str(_a )
raise ValueError(_a )
benchmark.run()
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 0 ):
lowerCamelCase__ , lowerCamelCase__ = row, column
lowerCamelCase__ = [[default_value for c in range(SCREAMING_SNAKE_CASE__ )] for r in range(SCREAMING_SNAKE_CASE__ )]
def __str__( self : Union[str, Any] ):
lowerCamelCase__ = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
lowerCamelCase__ = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , len(str(SCREAMING_SNAKE_CASE__ ) ) )
lowerCamelCase__ = F'%{max_element_length}s'
# Make string and return
def single_line(SCREAMING_SNAKE_CASE__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE__ ) for row_vector in self.array )
return s
def __repr__( self : Any ):
return str(self )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : tuple[int, int] ):
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : tuple[int, int] ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : float ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = value
def __add__( self : int , SCREAMING_SNAKE_CASE__ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ):
lowerCamelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = -self[r, c]
return result
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE__ : Matrix ):
return self + (-another)
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int | float | Matrix ):
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # Scalar multiplication
lowerCamelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ = F'Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__ )})'
raise TypeError(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c]
return result
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ = v.transpose()
lowerCamelCase__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case ( )-> None:
'''simple docstring'''
lowerCamelCase__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
lowerCamelCase__ = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1, 2, -3
lowerCamelCase__ = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(_a , _a )}' )
def snake_case ( )-> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 659 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = VideoToVideoSDPipeline
a_ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
a_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
a_ : int = PipelineTesterMixin.required_optional_params - {'latents'}
a_ : List[str] = False
# No `output_type`.
a_ : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _UpperCamelCase ( self : int ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=0 ):
# 3 frames
lowerCamelCase__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'np'
lowerCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
lowerCamelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCamelCase__ = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self : str ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _UpperCamelCase ( self : Any ):
pass
def _UpperCamelCase ( self : str ):
return super().test_progress_bar()
@slow
@skip_mps
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ = torch.randn((1, 10, 3, 10_24, 5_76) , generator=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = video.to('cuda' )
lowerCamelCase__ = 'Spiderman is surfing'
lowerCamelCase__ = pipe(SCREAMING_SNAKE_CASE__ , video=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=3 , output_type='pt' ).frames
lowerCamelCase__ = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 659 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE__ : str ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCamelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowerCamelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowerCamelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowerCamelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowerCamelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCamelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowerCamelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowerCamelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowerCamelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowerCamelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowerCamelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowerCamelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : str ):
return super().__call__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : float = 5_12 / 15_00 , SCREAMING_SNAKE_CASE__ : Optional[int] = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , ):
lowerCamelCase__ = load_image(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_processor.size['longest_edge']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowerCamelCase__ = self.get_inference_context()
with inference_context():
lowerCamelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ , device=self.device )
lowerCamelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowerCamelCase__ = image_embeddings
lowerCamelCase__ = grid_points.shape[1]
lowerCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
lowerCamelCase__ = input_labels[:, i : i + points_per_batch]
lowerCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=0.88 , SCREAMING_SNAKE_CASE__ : Any=0.95 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Dict=1 , ):
lowerCamelCase__ = model_inputs.pop('input_boxes' )
lowerCamelCase__ = model_inputs.pop('is_last' )
lowerCamelCase__ = model_inputs.pop('original_sizes' ).tolist()
lowerCamelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowerCamelCase__ = self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCamelCase__ = model_outputs['pred_masks']
lowerCamelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , binarize=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_outputs['iou_scores']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.7 , ):
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {}
if output_rle_mask:
lowerCamelCase__ = rle_mask
if output_bboxes_mask:
lowerCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 659 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = 'sew-d'
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=2_56 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : Optional[int]="layer_norm" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-7 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : str="group" , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.05 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Dict=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = position_buckets
lowerCamelCase__ = share_att_key
lowerCamelCase__ = relative_attention
lowerCamelCase__ = norm_rel_ebd
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = feature_layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def _UpperCamelCase ( self : Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 659 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 1 |
"""simple docstring"""
import math
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
lowerCamelCase__ = n
lowerCamelCase__ = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowerCamelCase__ = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = w
def _UpperCamelCase ( self : List[Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCamelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
return self.dp[u][v]
if __name__ == "__main__":
_snake_case = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 659 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = CustomTokenizer
pass
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int , _a: Any )-> Dict:
'''simple docstring'''
lowerCamelCase__ = (boundary[1] - boundary[0]) / steps
lowerCamelCase__ = boundary[0]
lowerCamelCase__ = boundary[1]
lowerCamelCase__ = make_points(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE_ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ )
return y
def snake_case ( _a: Dict , _a: Optional[int] , _a: int )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = a + h
while x < (b - h):
yield x
lowerCamelCase__ = x + h
def snake_case ( _a: List[Any] )-> Optional[Any]: # enter your function here
'''simple docstring'''
lowerCamelCase__ = (x - 0) * (x - 0)
return y
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = 0.0 # Lower bound of integration
lowerCamelCase__ = 1.0 # Upper bound of integration
lowerCamelCase__ = 10.0 # define number of steps or resolution
lowerCamelCase__ = [a, b] # define boundary of integration
lowerCamelCase__ = method_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'y = {y}' )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_snake_case = "Usage of script: script_name <size_of_canvas:int>"
_snake_case = [0] * 100 + [1] * 10
random.shuffle(choice)
def snake_case ( _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = [[False for i in range(__A )] for j in range(__A )]
return canvas
def snake_case ( _a: Any )-> Dict:
'''simple docstring'''
for i, row in enumerate(__A ):
for j, _ in enumerate(__A ):
lowerCamelCase__ = bool(random.getrandbits(1 ) )
def snake_case ( _a: Tuple )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = np.array(__A )
lowerCamelCase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__A ):
for c, pt in enumerate(__A ):
lowerCamelCase__ = __judge_point(
__A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCamelCase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCamelCase__ = current_canvas.tolist()
return return_canvas
def snake_case ( _a: Union[str, Any] , _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCamelCase__ = pt
if pt:
if alive < 2:
lowerCamelCase__ = False
elif alive == 2 or alive == 3:
lowerCamelCase__ = True
elif alive > 3:
lowerCamelCase__ = False
else:
if alive == 3:
lowerCamelCase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_snake_case = int(sys.argv[1])
# main working structure of this module.
_snake_case = create_canvas(canvas_size)
seed(c)
_snake_case , _snake_case = plt.subplots()
fig.show()
_snake_case = ListedColormap(["w", "k"])
try:
while True:
_snake_case = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] = BlenderbotSmallTokenizer
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : Union[str, Any] ):
super().setUp()
lowerCamelCase__ = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
lowerCamelCase__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
lowerCamelCase__ = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def _UpperCamelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = 'adapt act apte'
lowerCamelCase__ = 'adapt act apte'
return input_text, output_text
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = 'adapt act apte'
lowerCamelCase__ = ['adapt', 'act', 'ap@@', 'te']
lowerCamelCase__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [13_84]
lowerCamelCase__ = 'I am a small frog.'
lowerCamelCase__ = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )['input_ids']
lowerCamelCase__ = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
lowerCamelCase__ = 'I am a small frog .'
lowerCamelCase__ = '.'
lowerCamelCase__ = tok(UpperCAmelCase_ )['input_ids']
lowerCamelCase__ = tok(UpperCAmelCase_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = val
lowerCamelCase__ = None
lowerCamelCase__ = None
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
if self.val:
if val < self.val:
if self.left is None:
lowerCamelCase__ = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCamelCase__ = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCamelCase__ = val
def snake_case ( _a: Optional[Any] , _a: List[str] )-> List[Any]:
'''simple docstring'''
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def snake_case ( _a: Optional[int] )-> Tuple:
'''simple docstring'''
if len(_lowercase ) == 0:
return arr
lowerCamelCase__ = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCamelCase__ = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class _a ( lowercase__ ):
a_ : List[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : Optional[int] = Features({'text': Value('string' )} )
a_ : Any = Features({'summary': Value('string' )} )
a_ : str = 'text'
a_ : Optional[int] = 'summary'
@property
def _UpperCamelCase ( self : str ):
return {self.text_column: "text", self.summary_column: "summary"}
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def snake_case ( _a: Any )-> str:
'''simple docstring'''
lowerCamelCase__ = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def snake_case ( _a: Dict )-> Optional[int]:
'''simple docstring'''
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
lowerCamelCase__ = _readaa(lowerCamelCase_ )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCamelCase__ = _readaa(lowerCamelCase_ )
lowerCamelCase__ = _readaa(lowerCamelCase_ )
lowerCamelCase__ = _readaa(lowerCamelCase_ )
lowerCamelCase__ = bytestream.read(rows * cols * num_images )
lowerCamelCase__ = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
lowerCamelCase__ = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , 'Please use tf.one_hot on tensors.' )
def snake_case ( _a: str , _a: Dict )-> str:
'''simple docstring'''
lowerCamelCase__ = labels_dense.shape[0]
lowerCamelCase__ = numpy.arange(lowerCamelCase_ ) * num_classes
lowerCamelCase__ = numpy.zeros((num_labels, num_classes) )
lowerCamelCase__ = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def snake_case ( _a: Any , _a: Dict=False , _a: str=10 )-> Any:
'''simple docstring'''
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
lowerCamelCase__ = _readaa(lowerCamelCase_ )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCamelCase__ = _readaa(lowerCamelCase_ )
lowerCamelCase__ = bytestream.read(lowerCamelCase_ )
lowerCamelCase__ = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class _a :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=dtypes.floataa , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ):
lowerCamelCase__ = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase__ = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCamelCase__ = 1_00_00
lowerCamelCase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCamelCase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase__ = images.astype(numpy.floataa )
lowerCamelCase__ = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCamelCase__ = images
lowerCamelCase__ = labels
lowerCamelCase__ = 0
lowerCamelCase__ = 0
@property
def _UpperCamelCase ( self : List[Any] ):
return self._images
@property
def _UpperCamelCase ( self : List[Any] ):
return self._labels
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return self._num_examples
@property
def _UpperCamelCase ( self : Dict ):
return self._epochs_completed
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True ):
if fake_data:
lowerCamelCase__ = [1] * 7_84
lowerCamelCase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCamelCase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCamelCase__ = self.images[perma]
lowerCamelCase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase__ = self._num_examples - start
lowerCamelCase__ = self._images[start : self._num_examples]
lowerCamelCase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCamelCase__ = self.images[perm]
lowerCamelCase__ = self.labels[perm]
# Start next epoch
lowerCamelCase__ = 0
lowerCamelCase__ = batch_size - rest_num_examples
lowerCamelCase__ = self._index_in_epoch
lowerCamelCase__ = self._images[start:end]
lowerCamelCase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , 'Please write your own downloading logic.' )
def snake_case ( _a: List[Any] , _a: Optional[Any] , _a: Any )-> Optional[Any]:
'''simple docstring'''
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
lowerCamelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
lowerCamelCase__ = f.size()
print('Successfully downloaded' , lowerCamelCase_ , lowerCamelCase_ , 'bytes.' )
return filepath
@deprecated(
lowerCamelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def snake_case ( _a: Tuple , _a: Optional[Any]=False , _a: str=False , _a: Union[str, Any]=dtypes.floataa , _a: int=True , _a: Optional[int]=5000 , _a: Union[str, Any]=None , _a: Dict=DEFAULT_SOURCE_URL , )-> List[str]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
lowerCamelCase__ = fake()
lowerCamelCase__ = fake()
lowerCamelCase__ = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
lowerCamelCase__ = DEFAULT_SOURCE_URL
lowerCamelCase__ = 'train-images-idx3-ubyte.gz'
lowerCamelCase__ = 'train-labels-idx1-ubyte.gz'
lowerCamelCase__ = 't10k-images-idx3-ubyte.gz'
lowerCamelCase__ = 't10k-labels-idx1-ubyte.gz'
lowerCamelCase__ = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
lowerCamelCase__ = _extract_images(lowerCamelCase_ )
lowerCamelCase__ = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
lowerCamelCase__ = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
lowerCamelCase__ = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
lowerCamelCase__ = _extract_images(lowerCamelCase_ )
lowerCamelCase__ = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
lowerCamelCase__ = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
lowerCamelCase__ = (
'Validation size should be between 0 and '
F'{len(lowerCamelCase_ )}. Received: {validation_size}.'
)
raise ValueError(lowerCamelCase_ )
lowerCamelCase__ = train_images[:validation_size]
lowerCamelCase__ = train_labels[:validation_size]
lowerCamelCase__ = train_images[validation_size:]
lowerCamelCase__ = train_labels[validation_size:]
lowerCamelCase__ = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCamelCase__ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase__ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase__ = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
from collections.abc import Sequence
def snake_case ( _a: Sequence[float] , _a: bool = False )-> float:
'''simple docstring'''
if not arr:
return 0
lowerCamelCase__ = 0 if allow_empty_subarrays else float('-inf' )
lowerCamelCase__ = 0.0
for num in arr:
lowerCamelCase__ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase__ = max(_a , _a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _a ( __A ):
@require_torch
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCamelCase__ = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCamelCase__ = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCamelCase__ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCamelCase__ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ = '''1'''
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCamelCase__ = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCamelCase__ = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCamelCase__ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCamelCase__ = self.get_env()
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowerCamelCase__ = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowerCamelCase__ = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCamelCase__ = self.get_env()
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ = '''1'''
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = '''
from transformers import pipeline
'''
lowerCamelCase__ = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowerCamelCase__ = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowerCamelCase__ = self.get_env()
lowerCamelCase__ = '''1'''
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = '''
from transformers import AutoModel
'''
lowerCamelCase__ = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowerCamelCase__ = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCamelCase__ = self.get_env()
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ = '''1'''
lowerCamelCase__ = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import os
import string
import sys
_snake_case = 1 << 8
_snake_case = {
'tab': ord("\t"),
'newline': ord("\r"),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
_snake_case = KEYMAP['up']
_snake_case = KEYMAP['left']
if sys.platform == "win32":
_snake_case = []
_snake_case = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
_snake_case = ord(str(i))
def snake_case ( )-> Tuple:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCamelCase__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCAmelCase ) == 0:
# Read the keystroke
lowerCamelCase__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__lowerCAmelCase )
if ord(__lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCamelCase__ = chr(KEYMAP['esc'] )
except KeyError:
lowerCamelCase__ = cha[1]
else:
lowerCamelCase__ = ch.decode(__lowerCAmelCase )
else:
lowerCamelCase__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase__ = sys.stdin.fileno()
lowerCamelCase__ = termios.tcgetattr(__lowerCAmelCase )
try:
tty.setraw(__lowerCAmelCase )
lowerCamelCase__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCAmelCase , termios.TCSADRAIN , __lowerCAmelCase )
return ch
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCAmelCase ) == KEYMAP["esc"]:
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) == KEYMAP["mod_int"]:
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_snake_case = datasets.utils.logging.get_logger(__name__)
class _a ( folder_based_builder.FolderBasedBuilderConfig ):
a_ : bool = None
a_ : bool = None
class _a ( folder_based_builder.FolderBasedBuilder ):
a_ : Dict = datasets.Audio()
a_ : List[str] = "audio"
a_ : List[str] = AudioFolderConfig
a_ : List[str] # definition at the bottom of the script
a_ : Optional[Any] = AudioClassification(audio_column='audio' , label_column='label' )
_snake_case = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_snake_case = AUDIO_EXTENSIONS
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case = logging.getLogger(__name__)
def snake_case ( _a: Union[str, Any] , _a: Dict )-> int:
'''simple docstring'''
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(_lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def snake_case ( _a: Optional[Any] , _a: Optional[int]=False )-> str:
'''simple docstring'''
lowerCamelCase__ = 2
if unlogit:
lowerCamelCase__ = torch.pow(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = p * torch.log(_lowerCAmelCase )
lowerCamelCase__ = 0
return -plogp.sum(dim=-1 )
def snake_case ( _a: Any )-> List[Any]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def snake_case ( _a: List[Any] , _a: Dict , _a: str , _a: Any=True , _a: Dict=True , _a: int=None , _a: str=False )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
lowerCamelCase__ = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ = torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ = None
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ = tuple(t.to(args.device ) for t in inputs )
(lowerCamelCase__ ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ = model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = entropy(attn.detach() , _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ = 2
lowerCamelCase__ = torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCamelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_lowerCAmelCase )
logger.info('Head ranked by importance scores' )
lowerCamelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ = torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ = head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def snake_case ( _a: List[Any] , _a: Dict , _a: List[Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase )
lowerCamelCase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _lowerCAmelCase , original_score * args.masking_threshold )
lowerCamelCase__ = torch.ones_like(_lowerCAmelCase )
lowerCamelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ = original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
lowerCamelCase__ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ = new_head_mask.view(-1 )
lowerCamelCase__ = 0.0
lowerCamelCase__ = new_head_mask.view_as(_lowerCAmelCase )
lowerCamelCase__ = new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
lowerCamelCase__ = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase )
lowerCamelCase__ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case ( _a: Union[str, Any] , _a: List[str] , _a: Union[str, Any] , _a: Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = datetime.now()
lowerCamelCase__ = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase )
lowerCamelCase__ = 1 / loss
lowerCamelCase__ = datetime.now() - before_time
lowerCamelCase__ = sum(p.numel() for p in model.parameters() )
lowerCamelCase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = [
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
lowerCamelCase__ = sum(p.numel() for p in model.parameters() )
lowerCamelCase__ = datetime.now()
lowerCamelCase__ = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , )
lowerCamelCase__ = 1 / loss
lowerCamelCase__ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _lowerCAmelCase , _lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_lowerCAmelCase , args.output_dir )
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=_lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=_lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
lowerCamelCase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
lowerCamelCase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ = torch.device('cuda' , args.local_rank )
lowerCamelCase__ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ = nn.parallel.DistributedDataParallel(
_lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
lowerCamelCase__ = nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Prepare dataset
lowerCamelCase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ = (torch.from_numpy(_lowerCAmelCase ),)
lowerCamelCase__ = TensorDataset(*_lowerCAmelCase )
lowerCamelCase__ = RandomSampler(_lowerCAmelCase )
lowerCamelCase__ = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ = mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
lowerCamelCase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCamelCase__ = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_a , _a )
def _UpperCamelCase ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : int , **SCREAMING_SNAKE_CASE__ : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase__ = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase__ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_a , return_tensors='np' )
lowerCamelCase__ = processor(images=_a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_a )
lowerCamelCase__ = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(images=_a , visual_prompt=_a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_a )
lowerCamelCase__ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCamelCase__ = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCamelCase__ = 0.01
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
lowerCamelCase__ = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def snake_case ( _a: Optional[int] )-> int:
'''simple docstring'''
lowerCamelCase__ = 'a' * 1000 + '.lock'
lowerCamelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: List[str] , _a: List[Any]=False )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCamelCase__ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCamelCase__ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
lowerCamelCase__ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCamelCase__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
lowerCamelCase__ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ = key[key.find('block' ) + len('block' )]
lowerCamelCase__ = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
lowerCamelCase__ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if key.startswith('head' ):
lowerCamelCase__ = key.replace('head' , 'classifier' )
lowerCamelCase__ = value
return new_state_dict
def snake_case ( _a: Optional[Any] , _a: List[Any] )-> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ = kv_bias[
config.hidden_sizes[i] :
]
def snake_case ( )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def snake_case ( _a: int , _a: List[str] , _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = SegformerConfig()
lowerCamelCase__ = False
# set attributes based on model_name
lowerCamelCase__ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCamelCase__ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCamelCase__ = 150
lowerCamelCase__ = 'ade20k-id2label.json'
lowerCamelCase__ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCamelCase__ = 19
lowerCamelCase__ = 'cityscapes-id2label.json'
lowerCamelCase__ = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = model_name[4:6]
lowerCamelCase__ = 1000
lowerCamelCase__ = 'imagenet-1k-id2label.json'
lowerCamelCase__ = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
lowerCamelCase__ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCamelCase__ = [64, 128, 320, 512]
lowerCamelCase__ = 256
elif size == "b2":
lowerCamelCase__ = [64, 128, 320, 512]
lowerCamelCase__ = 768
lowerCamelCase__ = [3, 4, 6, 3]
elif size == "b3":
lowerCamelCase__ = [64, 128, 320, 512]
lowerCamelCase__ = 768
lowerCamelCase__ = [3, 4, 18, 3]
elif size == "b4":
lowerCamelCase__ = [64, 128, 320, 512]
lowerCamelCase__ = 768
lowerCamelCase__ = [3, 8, 27, 3]
elif size == "b5":
lowerCamelCase__ = [64, 128, 320, 512]
lowerCamelCase__ = 768
lowerCamelCase__ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCamelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCamelCase__ = torch.load(lowerCAmelCase__ , map_location=torch.device('cpu' ) )
else:
lowerCamelCase__ = torch.load(lowerCAmelCase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCamelCase__ = rename_keys(lowerCAmelCase__ , encoder_only=lowerCAmelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCamelCase__ = False
lowerCamelCase__ = SegformerForImageClassification(lowerCAmelCase__ )
else:
lowerCamelCase__ = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
lowerCamelCase__ = model(lowerCAmelCase__ )
lowerCamelCase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCamelCase__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCamelCase__ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCamelCase__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCamelCase__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case ( _a: Any , _a: Optional[Any] , _a: Union[str, Any]=None , _a: List[Any]=None , _a: Tuple=None , _a: Union[str, Any]=None , _a: List[Any]=None , _a: Dict=None , )-> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : int=0.02 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = initializer_range
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase__ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
lowerCamelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
lowerCamelCase__ = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = 20
lowerCamelCase__ = model_class_name(_UpperCamelCase )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] )
lowerCamelCase__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
lowerCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
lowerCamelCase__ = model.decode(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = 20
lowerCamelCase__ = model_class_name(_UpperCamelCase )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] )
lowerCamelCase__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
lowerCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
lowerCamelCase__ = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
lowerCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class _a ( unittest.TestCase ):
a_ : List[str] = 99
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__ = input_ids.shape[0]
lowerCamelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self._get_config_and_data()
lowerCamelCase__ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
lowerCamelCase__ = lm_model(input_ids=_UpperCamelCase )
lowerCamelCase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCamelCase )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
lowerCamelCase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCamelCase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase__ = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
lowerCamelCase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCamelCase )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCamelCase__ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
lowerCamelCase__ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
lowerCamelCase__ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _a ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ):
a_ : List[Any] = True
a_ : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ : Optional[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = FlaxBlenderbotModelTester(self )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : Dict ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = model_class(_UpperCamelCase )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCamelCase__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : str ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
lowerCamelCase__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowerCamelCase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCamelCase )
lowerCamelCase__ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
lowerCamelCase__ = ["""Sam"""]
lowerCamelCase__ = tokenizer(_UpperCamelCase , return_tensors='jax' )
lowerCamelCase__ = model.generate(**_UpperCamelCase , **_UpperCamelCase )
lowerCamelCase__ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowerCamelCase__ = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_A , )
assert hasattr(self , 'env' )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = {
'enabled': True,
'processes_per_host': 8,
}
lowerCamelCase__ = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
lowerCamelCase__ = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
lowerCamelCase__ = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='py36' , )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
TrainingJobAnalytics(_A ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _A )
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case ( _a: List[Any] )-> Any:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for rt in rc.restypes:
lowerCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase__ = {name: i for i, name in enumerate(_lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase__ = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein['aatype'].device , )
lowerCamelCase__ = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein['aatype'].device , )
lowerCamelCase__ = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein['aatype'].device , )
lowerCamelCase__ = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase__ = restype_atomaa_mask[protein_aatype]
lowerCamelCase__ = residx_atomaa_mask
lowerCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase__ = rc.restype_atoa[restype_letter]
lowerCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase__ = rc.atom_order[atom_name]
lowerCamelCase__ = 1
lowerCamelCase__ = restype_atomaa_mask[protein_aatype]
lowerCamelCase__ = residx_atomaa_mask
return protein
def snake_case ( _a: Optional[int] )-> str:
'''simple docstring'''
lowerCamelCase__ = tree_map(lambda _a : torch.tensor(_lowerCamelCase , device=batch['aatype'].device ) , _lowerCamelCase , np.ndarray )
lowerCamelCase__ = tensor_tree_map(lambda _a : np.array(_lowerCamelCase ) , make_atomaa_masks(_lowerCamelCase ) )
return out
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = 1
lowerCamelCase__ = 3
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
lowerCamelCase__ = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase__ = output.images
lowerCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
lowerCamelCase__ = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase__ = output.images
assert image.shape[0] == 2
lowerCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.dummy_cond_unet_upscale
lowerCamelCase__ = DDPMScheduler()
lowerCamelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase__ = self.dummy_vae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase__ = unet.half()
lowerCamelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__ = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
lowerCamelCase__ = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
lowerCamelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _UpperCamelCase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ = '''a cat sitting on a park bench'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type='np' , )
lowerCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class _a ( _UpperCAmelCase ):
a_ : int = 'deberta-v2'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=12_81_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : Tuple=24 , SCREAMING_SNAKE_CASE__ : Optional[int]=61_44 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-7 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str="gelu" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
super().__init__(**__UpperCamelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = relative_attention
lowerCamelCase__ = max_relative_positions
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = position_biased_input
# Backwards compatibility
if type(__UpperCamelCase ) == str:
lowerCamelCase__ = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCamelCase__ = pos_att_type
lowerCamelCase__ = vocab_size
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = kwargs.get('pooler_hidden_size' , __UpperCamelCase )
lowerCamelCase__ = pooler_dropout
lowerCamelCase__ = pooler_hidden_act
class _a ( _UpperCAmelCase ):
@property
def _UpperCamelCase ( self : int ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return 12
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ):
lowerCamelCase__ = super().generate_dummy_inputs(preprocessor=__UpperCamelCase , framework=__UpperCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_snake_case = get_logger(__name__)
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) )
lowerCamelCase__ = module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module
class _a :
a_ : Any = []
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split('.' )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : int ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case_ ) ):
try:
lowerCamelCase__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , snake_case_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , snake_case_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(snake_case_ , snake_case_ )
# finally set the target attribute
setattr(snake_case_ , snake_case_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module('.'.join(snake_case_ ) ) , snake_case_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case_ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , snake_case_ )
setattr(self.obj , snake_case_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()['__builtins__'][target_attr]
setattr(self.obj , snake_case_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Dict , *SCREAMING_SNAKE_CASE__ : Dict ):
for attr in list(self.original ):
setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) )
def _UpperCamelCase ( self : Dict ):
self.__enter__()
self._active_patches.append(self )
def _UpperCamelCase ( self : Union[str, Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: int , _a: int )-> Dict:
'''simple docstring'''
if b == 0:
return (1, 0)
(lowerCamelCase__) = extended_euclid(__UpperCamelCase , a % b )
lowerCamelCase__ = a // b
return (y, x - k * y)
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
(lowerCamelCase__) = extended_euclid(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( _a: int , _a: int )-> Optional[Any]:
'''simple docstring'''
(lowerCamelCase__) = extended_euclid(__UpperCamelCase , __UpperCamelCase )
if b < 0:
lowerCamelCase__ = (b % n + n) % n
return b
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = invert_modulo(__UpperCamelCase , __UpperCamelCase ), invert_modulo(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _a ( _lowercase ):
a_ : Dict = '''MCTCTFeatureExtractor'''
a_ : Optional[int] = '''AutoTokenizer'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
super().__init__(A_ , A_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ = kwargs.pop('audio' , A_ )
lowerCamelCase__ = kwargs.pop('sampling_rate' , A_ )
lowerCamelCase__ = kwargs.pop('text' , A_ )
if len(A_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings['input_ids']
return inputs
def _UpperCamelCase ( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCamelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
lowerCamelCase__ = kwargs.pop('input_features' , A_ )
lowerCamelCase__ = kwargs.pop('labels' , A_ )
if len(A_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if input_features is not None:
lowerCamelCase__ = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
lowerCamelCase__ = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ = labels['input_ids']
return input_features
def _UpperCamelCase ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ):
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def _UpperCamelCase ( self : Any ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer
yield
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
a_ : Union[str, Any] = MBartConfig
a_ : Dict = {}
a_ : List[str] = 'gelu'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Any=37 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=20 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : int=0 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = TFMBartModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
lowerCamelCase__ = inputs_dict['input_ids']
lowerCamelCase__ = input_ids[:1, :]
lowerCamelCase__ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ = inputs_dict['head_mask']
lowerCamelCase__ = 1
# first forward pass
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple()
lowerCamelCase__ = past_key_values[1]
def snake_case ( _a: int , _a: Tuple , _a: Union[str, Any] , _a: Union[str, Any]=None , _a: Optional[int]=None , _a: Optional[int]=None , _a: Optional[Any]=None , _a: List[str]=None , )-> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
a_ : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a_ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
a_ : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : str = True
a_ : Union[str, Any] = False
a_ : Dict = False
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFMBartModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
a_ : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
a_ : Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
a_ : int = 'facebook/mbart-large-en-ro'
@cached_property
def _UpperCamelCase ( self : List[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCamelCase ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.translate_src_text(**SCREAMING_SNAKE_CASE__ )
self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
lowerCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCamelCase__ = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
return generated_words
@slow
def _UpperCamelCase ( self : Optional[int] ):
self._assert_generated_batch_equal_expected()
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
import os
import sys
import transformers
_snake_case = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_snake_case = logging.get_logger(__name__)
class _a :
a_ : int = None
@experimental
def snake_case ( _a: Tuple , _a: Dict , _a: Optional[int] , _a: Optional[Any] , _a: List[Any] , _a: List[str] , _a: Optional[int] )-> Dict:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return _map_with_joblib(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def snake_case ( _a: Optional[int] , _a: Union[str, Any] , _a: Union[str, Any] , _a: Optional[int] , _a: List[str] , _a: int , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(snake_case_ ) else len(snake_case_ )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case_ ):
lowerCamelCase__ = len(snake_case_ ) // num_proc
lowerCamelCase__ = len(snake_case_ ) % num_proc
lowerCamelCase__ = div * index + min(snake_case_ , snake_case_ )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(snake_case_ )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(snake_case_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(snake_case_ , initargs=snake_case_ , initializer=snake_case_ ) as pool:
lowerCamelCase__ = pool.map(snake_case_ , snake_case_ )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(snake_case_ )} objects' )
return mapped
def snake_case ( _a: Optional[int] , _a: str , _a: Any , _a: Union[str, Any] , _a: str , _a: Optional[int] , _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=snake_case_ ):
return joblib.Parallel()(
joblib.delayed(snake_case_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case ( _a: str )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = "bird"
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = "Chef in the kitchen"
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCamelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [[0 for _ in range(_a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase__ = 1
for n in range(m + 1 ):
for k in range(1 , _a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_snake_case = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_snake_case = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: list[int | str] )-> int:
'''simple docstring'''
create_state_space_tree(snake_case__ , [] , 0 , [0 for i in range(len(snake_case__ ) )] )
def snake_case ( _a: list[int | str] , _a: list[int | str] , _a: int , _a: list[int] , )-> List[str]:
'''simple docstring'''
if index == len(snake_case__ ):
print(snake_case__ )
return
for i in range(len(snake_case__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase__ = True
create_state_space_tree(snake_case__ , snake_case__ , index + 1 , snake_case__ )
current_sequence.pop()
lowerCamelCase__ = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_snake_case = None
_snake_case = {
"7B": 1_1008,
"13B": 1_3824,
"30B": 1_7920,
"65B": 2_2016,
"70B": 2_8672,
}
_snake_case = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case ( _a: int , _a: Optional[int]=1 , _a: List[str]=256 )-> Union[str, Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case ( _a: Tuple )-> List[str]:
'''simple docstring'''
with open(__snake_case , 'r' ) as f:
return json.load(__snake_case )
def snake_case ( _a: Tuple , _a: Union[str, Any] )-> int:
'''simple docstring'''
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
def snake_case ( _a: List[str] , _a: Dict , _a: Dict , _a: Optional[int]=True )-> List[Any]:
'''simple docstring'''
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case , 'tmp' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase__ = read_json(os.path.join(__snake_case , 'params.json' ) )
lowerCamelCase__ = NUM_SHARDS[model_size]
lowerCamelCase__ = params['n_layers']
lowerCamelCase__ = params['n_heads']
lowerCamelCase__ = n_heads // num_shards
lowerCamelCase__ = params['dim']
lowerCamelCase__ = dim // n_heads
lowerCamelCase__ = 10000.0
lowerCamelCase__ = 1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase__ = params['n_kv_heads'] # for GQA / MQA
lowerCamelCase__ = n_heads_per_shard // num_key_value_heads
lowerCamelCase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase__ = n_heads
lowerCamelCase__ = n_heads_per_shard
lowerCamelCase__ = dim
# permute for sliced rotary
def permute(_a: Dict , _a: Tuple=n_heads , _a: Any=dim , _a: Union[str, Any]=dim ):
return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase__ = torch.load(os.path.join(__snake_case , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
lowerCamelCase__ = [
torch.load(os.path.join(__snake_case , F'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(__snake_case )
]
lowerCamelCase__ = 0
lowerCamelCase__ = {'weight_map': {}}
for layer_i in range(__snake_case ):
lowerCamelCase__ = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase__ = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) )
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , )
lowerCamelCase__ = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(__snake_case )] , dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__snake_case )] , dim=0 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__snake_case )] , dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__snake_case )] , dim=0 )
lowerCamelCase__ = inv_freq
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
lowerCamelCase__ = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
lowerCamelCase__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(__snake_case )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(__snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
# Write configs
lowerCamelCase__ = {'total_size': param_count * 2}
write_json(__snake_case , os.path.join(__snake_case , 'pytorch_model.bin.index.json' ) )
lowerCamelCase__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
lowerCamelCase__ = params['multiple_of'] if 'multiple_of' in params else 256
lowerCamelCase__ = LlamaConfig(
hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=__snake_case , )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
lowerCamelCase__ = LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(__snake_case , safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def snake_case ( _a: List[str] , _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
lowerCamelCase__ = tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=__snake_case , help='Whether or not to save using `safetensors`.' )
lowerCamelCase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCamelCase__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , __snake_case )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import sys
def snake_case ( _a: int )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = len(UpperCAmelCase__ )
lowerCamelCase__ = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
lowerCamelCase__ = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
for chain_length in range(2 , UpperCAmelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase__ = a + chain_length - 1
lowerCamelCase__ = sys.maxsize
for c in range(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase__ = cost
lowerCamelCase__ = c
return matrix, sol
def snake_case ( _a: Optional[Any] , _a: Any , _a: Dict )-> str:
'''simple docstring'''
if i == j:
print('A' + str(UpperCAmelCase__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(UpperCAmelCase__ , UpperCAmelCase__ , optimal_solution[i][j] )
print_optiomal_solution(UpperCAmelCase__ , optimal_solution[i][j] + 1 , UpperCAmelCase__ )
print(')' , end=' ' )
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [30, 35, 15, 5, 10, 20, 25]
lowerCamelCase__ = len(UpperCAmelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase__ = matrix_chain_order(UpperCAmelCase__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCAmelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import string
from math import logaa
def snake_case ( _a: str , _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowerCamelCase__ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case ( _a: str , _a: str )-> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('\n' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowercase ))
def snake_case ( _a: int , _a: int , _a: Union[str, Any]=False )-> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def snake_case ( _a: int , _a: int )-> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.