code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case = iter(_UpperCamelCase )
while True:
snake_case = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = prepare_input(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 150 | """simple docstring"""
import argparse
import copy
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
snake_case = {}
with open(_UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_UpperCamelCase ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(_UpperCamelCase )
snake_case = distance_of_first_solution + int(_UpperCamelCase )
snake_case = best_node
first_solution.append(_UpperCamelCase )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
for kn in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
if n == kn:
continue
snake_case = copy.deepcopy(_UpperCamelCase )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(_UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(_UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(_UpperCamelCase , _UpperCamelCase )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(_UpperCamelCase ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(_UpperCamelCase ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(_UpperCamelCase ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _UpperCamelCase : int=None ) -> List[str]:
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case ,snake_case = generate_first_solution(
args.File , _UpperCamelCase )
snake_case ,snake_case = tabu_search(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 150 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir('fixtures')
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = mock.Mock()
_lowercase : Any = 500
_lowercase : Optional[int] = {}
_lowercase : List[str] = HTTPError
_lowercase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_lowercase : Optional[Any] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_a ) as mock_head:
_lowercase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_lowercase : List[str] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(_a )
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls ):
"""simple docstring"""
_lowercase : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def _lowerCamelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_lowercase : List[str] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id="test-image-processor" , push_to_hub=_a , use_auth_token=self._token )
_lowercase : Dict = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_lowercase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id="valid_org/test-image-processor-org" , push_to_hub=_a , use_auth_token=self._token )
_lowercase : List[str] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
_lowercase : str = CustomImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_lowercase : Dict = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 361 |
'''simple docstring'''
from math import pi, sqrt, tan
def _A ( snake_case ) -> float:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( snake_case , snake_case , snake_case ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( snake_case , snake_case , snake_case ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_lowercase : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( snake_case , snake_case ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case , 2 ) * torus_radius * tube_radius
def _A ( snake_case , snake_case ) -> float:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( snake_case ) -> float:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( snake_case , snake_case , snake_case ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_lowercase : Any = (sidea + sidea + sidea) / 2
_lowercase : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( snake_case , snake_case , snake_case ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( snake_case , snake_case ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( snake_case , snake_case ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( snake_case , snake_case ) -> float:
if not isinstance(snake_case , snake_case ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 199 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ = 12_8022
A__ = 12_8028
@require_sentencepiece
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MaMaaaTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
snake_case__ : Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : List[Any] = Path(self.tmpdirname )
save_json(__lowercase ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(__lowercase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[2, 3, 4, 5, 6] ,)
snake_case__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase ,'''This is a test''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
# fmt: off
snake_case__ : Tuple = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
__lowerCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowerCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowerCAmelCase : Dict = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ):
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
snake_case__ : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self :Tuple ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,1_2_8_0_6_3 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
snake_case__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
snake_case__ : Any = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id ,__lowercase )
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''en'''
snake_case__ : List[Any] = '''fr'''
snake_case__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
snake_case__ : Optional[int] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
snake_case__ : Any = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
} ,)
| 44 |
A__ = 0 # The first color of the flag.
A__ = 1 # The second color of the flag.
A__ = 2 # The third color of the flag.
A__ = (red, white, blue)
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : str = len(__lowerCAmelCase ) - 1
snake_case__ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ : int = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : List[Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by commas:\n''').strip()
A__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 44 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase : Tuple = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 169 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
for attribute in key.split('.' ):
lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase_ : List[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase_ : Optional[Any] = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : Union[str, Any] = value
else:
lowercase_ : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Optional[int] = []
lowercase_ : Optional[int] = fairseq_model.state_dict()
lowercase_ : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase_ : str = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase_ : str = True
if "*" in mapped_key:
lowercase_ : int = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase_ : Optional[Any] = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase_ : Any = 'weight_g'
elif "weight_v" in name:
lowercase_ : Tuple = 'weight_v'
elif "weight" in name:
lowercase_ : int = 'weight'
elif "bias" in name:
lowercase_ : List[Any] = 'bias'
else:
lowercase_ : Optional[Any] = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Dict = full_name.split('conv_layers.' )[-1]
lowercase_ : int = name.split('.' )
lowercase_ : Any = int(items[0] )
lowercase_ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : str = SEWConfig()
if is_finetuned:
lowercase_ : List[Any] = model.wav_encoder.wav_model.cfg
else:
lowercase_ : Tuple = model.cfg
lowercase_ : Any = fs_config.conv_bias
lowercase_ : Optional[Any] = eval(fs_config.conv_feature_layers )
lowercase_ : int = [x[0] for x in conv_layers]
lowercase_ : Any = [x[1] for x in conv_layers]
lowercase_ : Optional[Any] = [x[2] for x in conv_layers]
lowercase_ : Tuple = 'gelu'
lowercase_ : str = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
lowercase_ : int = 0.0
lowercase_ : Any = fs_config.activation_fn.name
lowercase_ : Tuple = fs_config.encoder_embed_dim
lowercase_ : int = 0.02
lowercase_ : Union[str, Any] = fs_config.encoder_ffn_embed_dim
lowercase_ : Tuple = 1E-5
lowercase_ : Union[str, Any] = fs_config.encoder_layerdrop
lowercase_ : Tuple = fs_config.encoder_attention_heads
lowercase_ : List[str] = fs_config.conv_pos_groups
lowercase_ : Union[str, Any] = fs_config.conv_pos
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = fs_config.encoder_layers
lowercase_ : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ : Dict = model.cfg
lowercase_ : Dict = fs_config.final_dropout
lowercase_ : Dict = fs_config.layerdrop
lowercase_ : Optional[int] = fs_config.activation_dropout
lowercase_ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ : List[Any] = fs_config.attention_dropout
lowercase_ : Tuple = fs_config.dropout_input
lowercase_ : List[Any] = fs_config.dropout
lowercase_ : Any = fs_config.mask_channel_length
lowercase_ : str = fs_config.mask_channel_prob
lowercase_ : Optional[Any] = fs_config.mask_length
lowercase_ : Tuple = fs_config.mask_prob
lowercase_ : List[Any] = 'Wav2Vec2FeatureExtractor'
lowercase_ : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=True ):
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ : List[str] = SEWConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Tuple = convert_config(model[0] , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model[0].eval()
lowercase_ : List[Any] = True if config.feat_extract_norm == 'layer' else False
lowercase_ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
lowercase_ : Dict = Dictionary.load(__SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : str = target_dict.pad_index
lowercase_ : Union[str, Any] = target_dict.bos_index
lowercase_ : Tuple = target_dict.pad_index
lowercase_ : List[Any] = target_dict.bos_index
lowercase_ : Any = target_dict.eos_index
lowercase_ : str = len(target_dict.symbols )
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__SCREAMING_SNAKE_CASE ) )
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = WavaVecaCTCTokenizer(
__SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = WavaVecaProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = SEWForCTC(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Any = SEWModel(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 213 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : Dict = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 191 |
from __future__ import annotations
import time
_lowerCamelCase : Tuple = list[tuple[int, int]]
_lowerCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCamelCase : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : Node | None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pos_x
SCREAMING_SNAKE_CASE__ : List[Any] = pos_y
SCREAMING_SNAKE_CASE__ : str = (pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : Dict = goal_x
SCREAMING_SNAKE_CASE__ : List[Any] = goal_y
SCREAMING_SNAKE_CASE__ : str = parent
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : tuple[int, int], _UpperCAmelCase : tuple[int, int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Node(start[1], start[0], goal[1], goal[0], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(goal[1], goal[0], goal[1], goal[0], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.start]
SCREAMING_SNAKE_CASE__ : str = False
def A_ ( self : Dict ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE__ : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Dict = True
return self.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def A_ ( self : Any, _UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE__ : str = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase, _UpperCAmelCase, self.target.pos_y, self.target.pos_x, _UpperCAmelCase ) )
return successors
def A_ ( self : List[str], _UpperCAmelCase : Node | None ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = node
SCREAMING_SNAKE_CASE__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_node.parent
path.reverse()
return path
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = BreadthFirstSearch(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BreadthFirstSearch(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = False
def A_ ( self : List[str] ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE__ : int = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE__ : List[Any] = True
return self.retrace_bidirectional_path(
_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = current_bwd_node
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_fwd_node
SCREAMING_SNAKE_CASE__ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A_ ( self : int, _UpperCAmelCase : Node, _UpperCAmelCase : Node ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.fwd_bfs.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_lowerCamelCase : Optional[Any] = (0, 0)
_lowerCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowerCamelCase : Dict = time.time()
_lowerCamelCase : List[Any] = BreadthFirstSearch(init, goal)
_lowerCamelCase : Optional[Any] = bfs.search()
_lowerCamelCase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
_lowerCamelCase : str = time.time()
_lowerCamelCase : Dict = BidirectionalBreadthFirstSearch(init, goal)
_lowerCamelCase : Optional[Any] = bd_bfs.search()
_lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 191 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**_snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(_snake_case )
def __call__( self , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if "text_queries" in kwargs:
_lowerCAmelCase = kwargs.pop("""text_queries""" )
if isinstance(_snake_case , (str, Image.Image) ):
_lowerCAmelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
_lowerCAmelCase = image
_lowerCAmelCase = super().__call__(_snake_case , **_snake_case )
return results
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
if "threshold" in kwargs:
_lowerCAmelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
_lowerCAmelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = load_image(inputs["""image"""] )
_lowerCAmelCase = inputs["""candidate_labels"""]
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = candidate_labels.split(""",""" )
_lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_snake_case ):
_lowerCAmelCase = self.tokenizer(_snake_case , return_tensors=self.framework )
_lowerCAmelCase = self.image_processor(_snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(_snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""target_size""" )
_lowerCAmelCase = model_inputs.pop("""candidate_label""" )
_lowerCAmelCase = model_inputs.pop("""is_last""" )
_lowerCAmelCase = self.model(**_snake_case )
_lowerCAmelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def snake_case ( self , _snake_case , _snake_case=0.1 , _snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = []
for model_output in model_outputs:
_lowerCAmelCase = model_output["""candidate_label"""]
_lowerCAmelCase = BaseModelOutput(_snake_case )
_lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=_snake_case , threshold=_snake_case , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase = outputs["""scores"""][index].item()
_lowerCAmelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
_lowerCAmelCase = {"""score""": score, """label""": label, """box""": box}
results.append(_snake_case )
_lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )
if top_k:
_lowerCAmelCase = results[:top_k]
return results
def snake_case ( self , _snake_case ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = box.int().tolist()
_lowerCAmelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 82 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_:List[Any] = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCamelCase : KEY
__lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __bool__( self ):
return False
SCREAMING_SNAKE_CASE_:Optional[Any] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 8, lowerCamelCase__ = 0.75 ):
A : List[str] = initial_block_size
A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A : Dict = capacity_factor
A : Optional[Any] = 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return hash(lowerCamelCase__ ) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = self._buckets[ind]
if not stored:
A : Optional[int] = _Item(lowerCamelCase__, lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A : int = _Item(lowerCamelCase__, lowerCamelCase__ )
return True
else:
return False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._buckets
A : Optional[Any] = [None] * new_size
A : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A : Dict = self._get_next_ind(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
break
def __setitem__( self, lowerCamelCase__, lowerCamelCase__ ):
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__, lowerCamelCase__ )
def __delitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : Tuple = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A : Union[str, Any] = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 116 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]:
UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Optional[Any] = image_mean
UpperCamelCase : Tuple = image_std
def snake_case_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processor
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> str:
# Initialize image_processor
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processor
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
| 103 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(snake_case_ )] )
UpperCAmelCase_ = np.array(snake_case_ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , snake_case_ ) ) , x.transpose() ) , snake_case_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
snake_case_ , exog=snake_case_ , order=snake_case_ , seasonal_order=snake_case_ )
UpperCAmelCase_ = model.fit(disp=snake_case_ , maxiter=6_00 , method="nm" )
UpperCAmelCase_ = model_fit.predict(1 , len(snake_case_ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(snake_case_ , snake_case_ )
UpperCAmelCase_ = regressor.predict(snake_case_ )
return y_pred[0]
def lowerCAmelCase_ ( snake_case_ : list ) -> float:
'''simple docstring'''
train_user.sort()
UpperCAmelCase_ = np.percentile(snake_case_ , 25 )
UpperCAmelCase_ = np.percentile(snake_case_ , 75 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : float ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(snake_case_ ) - abs(snake_case_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
SCREAMING_SNAKE_CASE_: List[Any] =[[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
SCREAMING_SNAKE_CASE_: Dict =pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
SCREAMING_SNAKE_CASE_: Any =Normalizer().fit_transform(data_input_df.values)
# split data
SCREAMING_SNAKE_CASE_: List[str] =normalize_df[:, 2].tolist()
SCREAMING_SNAKE_CASE_: str =normalize_df[:, 0].tolist()
SCREAMING_SNAKE_CASE_: str =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
SCREAMING_SNAKE_CASE_: Dict =normalize_df[:, [1, 2]].tolist()
SCREAMING_SNAKE_CASE_: Tuple =x[: len(x) - 1]
SCREAMING_SNAKE_CASE_: List[Any] =x[len(x) - 1 :]
# for linear regression & sarimax
SCREAMING_SNAKE_CASE_: int =total_date[: len(total_date) - 1]
SCREAMING_SNAKE_CASE_: Dict =total_user[: len(total_user) - 1]
SCREAMING_SNAKE_CASE_: Tuple =total_match[: len(total_match) - 1]
SCREAMING_SNAKE_CASE_: Optional[int] =total_date[len(total_date) - 1 :]
SCREAMING_SNAKE_CASE_: str =total_user[len(total_user) - 1 :]
SCREAMING_SNAKE_CASE_: int =total_match[len(total_match) - 1 :]
# voting system with forecasting
SCREAMING_SNAKE_CASE_: Optional[int] =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
SCREAMING_SNAKE_CASE_: Optional[int] ='' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Any = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = inputs['''prompt''']
UpperCamelCase : Optional[Any] = inputs['''generator''']
UpperCamelCase : int = inputs['''num_inference_steps''']
UpperCamelCase : Union[str, Any] = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase : Tuple = inputs['''image''']
else:
UpperCamelCase : Dict = None
if "mask_image" in inputs:
UpperCamelCase : Optional[Any] = inputs['''mask_image''']
else:
UpperCamelCase : Optional[int] = None
if "original_image" in inputs:
UpperCamelCase : List[Any] = inputs['''original_image''']
else:
UpperCamelCase : str = None
UpperCamelCase , UpperCamelCase : Tuple = pipe.encode_prompt(__SCREAMING_SNAKE_CASE )
# inputs with prompt converted to embeddings
UpperCamelCase : Tuple = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : Dict = image
if mask_image is not None:
UpperCamelCase : Tuple = mask_image
if original_image is not None:
UpperCamelCase : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCamelCase : Dict = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = inputs['''generator''']
UpperCamelCase : List[str] = inputs['''num_inference_steps''']
UpperCamelCase : str = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase : int = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : Tuple = image
if mask_image is not None:
UpperCamelCase : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase : List[Any] = original_image
UpperCamelCase : Any = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : str = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Dict = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
| 352 |
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@require_torch
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
snake_case_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
snake_case_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
snake_case_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__lowercase )
BertModel.from_pretrained(__lowercase )
BertTokenizer.from_pretrained(__lowercase )
pipeline(task="fill-mask" , model=__lowercase )
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
snake_case_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = "1"
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
snake_case_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
snake_case_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
snake_case_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__lowercase )
BertModel.from_pretrained(__lowercase )
BertTokenizer.from_pretrained(__lowercase )
pipeline(task="fill-mask" , model=__lowercase )
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
snake_case_ = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
snake_case_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
snake_case_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = "1"
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = "\nfrom transformers import pipeline\n "
snake_case_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
snake_case_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
snake_case_ = self.get_env()
snake_case_ = "1"
snake_case_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = "\nfrom transformers import AutoModel\n "
snake_case_ = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = "1"
snake_case_ = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 187 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 187 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Dict = 16
_SCREAMING_SNAKE_CASE : Dict = 32
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 16 ):
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=UpperCamelCase_ ,max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case = datasets.map(
UpperCamelCase_ ,batched=UpperCamelCase_ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case = 16
elif accelerator.mixed_precision != "no":
snake_case = 8
else:
snake_case = None
return tokenizer.pad(
UpperCamelCase_ ,padding='''longest''' ,max_length=UpperCamelCase_ ,pad_to_multiple_of=UpperCamelCase_ ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets['''train'''] ,shuffle=UpperCamelCase_ ,collate_fn=UpperCamelCase_ ,batch_size=UpperCamelCase_ )
snake_case = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=UpperCamelCase_ ,collate_fn=UpperCamelCase_ ,batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : List[str] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,UpperCamelCase_ ) == "1":
snake_case = 2
# New Code #
snake_case = int(args.gradient_accumulation_steps )
snake_case = int(args.local_sgd_steps )
# Initialize accelerator
snake_case = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=UpperCamelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['''lr''']
snake_case = int(config['''num_epochs'''] )
snake_case = int(config['''seed'''] )
snake_case = int(config['''batch_size'''] )
snake_case = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(UpperCamelCase_ )
snake_case , snake_case = get_dataloaders(UpperCamelCase_ ,UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case = model.to(accelerator.device )
# Instantiate optimizer
snake_case = AdamW(params=model.parameters() ,lr=UpperCamelCase_ )
# Instantiate scheduler
snake_case = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ ,num_warmup_steps=1_00 ,num_training_steps=(len(UpperCamelCase_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
with LocalSGD(
accelerator=UpperCamelCase_ ,model=UpperCamelCase_ ,local_sgd_steps=UpperCamelCase_ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase_ ):
snake_case = model(**UpperCamelCase_ )
snake_case = output.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**UpperCamelCase_ )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case , snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase_ ,references=UpperCamelCase_ ,)
snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,UpperCamelCase_ )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=UpperCamelCase_ ,default=UpperCamelCase_ ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=UpperCamelCase_ ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument(
'''--local_sgd_steps''' ,type=UpperCamelCase_ ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
snake_case = parser.parse_args()
snake_case = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase_ ,UpperCamelCase_ )
if __name__ == "__main__":
main()
| 213 |
from __future__ import annotations
import time
_SCREAMING_SNAKE_CASE : List[Any] = list[tuple[int, int]]
_SCREAMING_SNAKE_CASE : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = pos_x
snake_case = pos_y
snake_case = (pos_y, pos_x)
snake_case = goal_x
snake_case = goal_y
snake_case = parent
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , __snake_case )
snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , __snake_case )
snake_case = [self.start]
snake_case = False
def a_ ( self ):
while self.node_queue:
snake_case = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case = True
return self.retrace_path(__snake_case )
snake_case = self.get_successors(__snake_case )
for node in successors:
self.node_queue.append(__snake_case )
if not self.reached:
return [self.start.pos]
return None
def a_ ( self , __snake_case ):
snake_case = []
for action in delta:
snake_case = parent.pos_x + action[1]
snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__snake_case , __snake_case , self.target.pos_y , self.target.pos_x , __snake_case ) )
return successors
def a_ ( self , __snake_case ):
snake_case = node
snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case = current_node.parent
path.reverse()
return path
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
snake_case = BreadthFirstSearch(__snake_case , __snake_case )
snake_case = BreadthFirstSearch(__snake_case , __snake_case )
snake_case = False
def a_ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case = self.fwd_bfs.node_queue.pop(0 )
snake_case = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case = True
return self.retrace_bidirectional_path(
__snake_case , __snake_case )
snake_case = current_bwd_node
snake_case = current_fwd_node
snake_case = {
self.fwd_bfs: self.fwd_bfs.get_successors(__snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(__snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self , __snake_case , __snake_case ):
snake_case = self.fwd_bfs.retrace_path(__snake_case )
snake_case = self.bwd_bfs.retrace_path(__snake_case )
bwd_path.pop()
bwd_path.reverse()
snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Optional[Any] = (0, 0)
_SCREAMING_SNAKE_CASE : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE : List[Any] = time.time()
_SCREAMING_SNAKE_CASE : List[Any] = BreadthFirstSearch(init, goal)
_SCREAMING_SNAKE_CASE : List[str] = bfs.search()
_SCREAMING_SNAKE_CASE : int = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
_SCREAMING_SNAKE_CASE : Any = time.time()
_SCREAMING_SNAKE_CASE : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
_SCREAMING_SNAKE_CASE : Union[str, Any] = bd_bfs.search()
_SCREAMING_SNAKE_CASE : Tuple = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 213 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a__ : int =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a__ : Any =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase__ ( __lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__UpperCamelCase = []
for i in range(len(__lowercase ) ):
__UpperCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowercase )
return next_generation
def lowercase__ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[Image.Image]:
"""simple docstring"""
__UpperCamelCase = []
for _ in range(__lowercase ):
# Create output image
__UpperCamelCase = Image.new('RGB' , (len(cells[0] ), len(__lowercase )) )
__UpperCamelCase = img.load()
# Save cells to image
for x in range(len(__lowercase ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase = 255 - cells[y][x] * 255
__UpperCamelCase = (colour, colour, colour)
# Save image
images.append(__lowercase )
__UpperCamelCase = new_generation(__lowercase )
return images
if __name__ == "__main__":
a__ : Optional[int] =generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 53 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 370 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase = model
lowerCamelCase = 2
lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __A ( self ) -> int:
'''simple docstring'''
pass
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = LongformerModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = LightningModel(lowerCamelCase__ )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 66 | 0 |
'''simple docstring'''
import qiskit
def __lowerCAmelCase ( UpperCamelCase__ = 2 ) -> qiskit.result.counts.Counts:
__lowerCamelCase = qubits
# Using Aer's simulator
__lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
__lowerCamelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCamelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 67 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case = 3
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randrange(3 , lowercase )
if pow(lowercase , 2 , lowercase ) == 1:
continue
if pow(lowercase , lowercase , lowercase ) == 1:
continue
return g
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
print("Generating prime p..." )
SCREAMING_SNAKE_CASE : Tuple = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
SCREAMING_SNAKE_CASE : List[Any] = primitive_root(lowercase ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE : Tuple = random.randrange(3 , lowercase ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE : Dict = cryptomath.find_mod_inverse(pow(lowercase , lowercase , lowercase ) , lowercase )
SCREAMING_SNAKE_CASE : Tuple = (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE : Union[str, Any] = (key_size, d)
return public_key, private_key
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_key(lowercase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , "w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , "w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 319 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a : Dict = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
def __init__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 56 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCamelCase : List[str] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_lowerCamelCase : int = []
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_lowerCamelCase : Tuple = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
'emoji': True,
},
}
]
_lowerCamelCase : Union[str, Any] = 0
for log in Path().glob('*.log'):
_lowerCamelCase : Dict = 0
with open(log, 'r') as f:
for line in f:
_lowerCamelCase : Tuple = json.loads(line)
if line.get('nodeid', '') != "":
_lowerCamelCase : List[str] = line['nodeid']
if line.get('duration', None) is not None:
_lowerCamelCase : Optional[Any] = f"{line['duration']:.4f}"
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCamelCase : Optional[Any] = []
log.unlink()
_lowerCamelCase : int = ''
_lowerCamelCase : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Tuple = {}
for test in failed_tests:
_lowerCamelCase : int = test[0].split('::')
_lowerCamelCase : Dict = data[0].split('/')[-1]
if data[0] not in filesafailed:
_lowerCamelCase : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCamelCase : Optional[int] = [test[0] for test in failed_table]
_lowerCamelCase : Optional[int] = list(set(files))
# Count number of instances in failed_tests
_lowerCamelCase : List[str] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCamelCase : str = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_lowerCamelCase : Optional[int] = 'Too many failed tests, please see the full report in the Action results.'
_lowerCamelCase : Dict = len(err) + 10
_lowerCamelCase : Optional[Any] = message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
_lowerCamelCase : int = 'No failed tests! 🤗'
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_lowerCamelCase : Optional[Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_lowerCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_lowerCamelCase : List[str] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
_lowerCamelCase : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
_lowerCamelCase : Tuple = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_lowerCamelCase : Union[str, Any] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCamelCase : Any = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCamelCase : str = row[0]
else:
_lowerCamelCase : Union[str, Any] = ''
_lowerCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
"""simple docstring"""
def __init__( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : Any = """"""
lowerCAmelCase_ : Any = """"""
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : int = 2_56
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = 0
def __lowercase ( self : int , lowerCamelCase : str ) -> int:
lowerCAmelCase_ : Tuple = cva.imread(__A , 0 )
lowerCAmelCase_ : Any = copy.deepcopy(self.img )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Any = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
lowerCAmelCase_ : List[str] = np.sum(__A )
for i in range(len(__A ) ):
lowerCAmelCase_ : str = x[i] / self.k
self.sk += prk
lowerCAmelCase_ : Union[str, Any] = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase_ : List[str] = int(last % last )
lowerCAmelCase_ : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__A )
lowerCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase_ : List[str] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase_ : Optional[int] = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase_ : Union[str, Any] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __lowercase ( self : str ) -> Any:
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__A : int = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__A : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 359 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 89 | 0 |
from sklearn.metrics import recall_score
import datasets
A : Optional[Any] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
A : Optional[Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
A : Tuple = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None , _snake_case="warn" , ) -> Any:
'''simple docstring'''
__a = recall_score(
_snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , )
return {"recall": float(_snake_case ) if score.size == 1 else score} | 6 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[Any]:
a = len(__UpperCamelCase)
while cur > 1:
# Find the maximum number in arr
a = arr.index(max(arr[0:cur]))
# Reverse from 0 to mi
a = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase)]
# Reverse whole list
a = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase)]
cur -= 1
return arr
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : int = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 180 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Union[str, Any]=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Any=None , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->Dict:
"""simple docstring"""
snake_case_ = TFViTMAEModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = TFViTMAEForPreTraining(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
# expected sequence length = num_patches
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFViTMAEForPreTraining(UpperCAmelCase_ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(snake_case_) = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase: Optional[Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowercase: Optional[int] = False
__lowercase: str = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
def lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
snake_case_ = TFViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer ) )
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = outputs_dict[0].numpy()
snake_case_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCAmelCase_ : Dict ):
snake_case_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase_ ):
snake_case_ = v.numpy()
else:
snake_case_ = np.array(UpperCAmelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = prepare_numpy_arrays(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) ->Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.constant(UpperCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = tf_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCAmelCase_ , UpperCAmelCase_ ),)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase_ , """_keras_serializable""" , UpperCAmelCase_ )
}
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.convert_to_tensor(UpperCAmelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ = main_layer_class(UpperCAmelCase_ )
snake_case_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ = tf.keras.Model(UpperCAmelCase_ , outputs=main_layer(UpperCAmelCase_ ) )
snake_case_ = model(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """keras_model.h5""" )
model.save(UpperCAmelCase_ )
snake_case_ = tf.keras.models.load_model(
UpperCAmelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase_ , tf.keras.Model )
snake_case_ = model(UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = outputs.last_hidden_state.numpy()
snake_case_ = 0
else:
snake_case_ = outputs.logits.numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ , saved_model=UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = after_outputs["""last_hidden_state"""].numpy()
snake_case_ = 0
else:
snake_case_ = after_outputs["""logits"""].numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase_ )
snake_case_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ = model_class.from_config(model.config )
snake_case_ = new_model(UpperCAmelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ = new_model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Union[str, Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
# verify the logits
snake_case_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
| 347 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''OwlViTFeatureExtractor''']
__snake_case = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 362 |
'''simple docstring'''
import socket
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase__ :List[Any] = socket.gethostname()
UpperCamelCase__ :List[str] = 12312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCamelCase__ :str = sock.recv(1024 )
if not data:
break
out_file.write(__a )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 219 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _lowerCamelCase( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase, config_name=lowerCamelCase)
_lowercase : List[str] = GenerationConfig.from_pretrained(lowerCamelCase, config_name=lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, lowerCamelCase)
self.assertEqual(loaded_config.temperature, 0.7)
self.assertEqual(loaded_config.length_penalty, 1.0)
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 50)
self.assertEqual(loaded_config.max_length, 20)
self.assertEqual(loaded_config.max_time, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = AutoConfig.from_pretrained('gpt2')
_lowercase : List[str] = GenerationConfig.from_model_config(lowerCamelCase)
_lowercase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase, lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = GenerationConfig()
_lowercase : List[Any] = {
'max_new_tokens': 10_24,
'foo': 'bar',
}
_lowercase : Any = copy.deepcopy(lowerCamelCase)
_lowercase : List[Any] = generation_config.update(**lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase, lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 10_24)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase, {'foo': 'bar'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = GenerationConfig()
_lowercase : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, 'bar')
_lowercase : int = GenerationConfig.from_model_config(lowerCamelCase)
assert not hasattr(lowerCamelCase, 'foo') # no new kwargs should be initialized if from config
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0)
self.assertEqual(default_config.do_sample, lowerCamelCase)
self.assertEqual(default_config.num_beams, 1)
_lowercase : Optional[Any] = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7)
self.assertEqual(config.do_sample, lowerCamelCase)
self.assertEqual(config.num_beams, 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase, temperature=1.0)
self.assertEqual(loaded_config.temperature, 1.0)
self.assertEqual(loaded_config.do_sample, lowerCamelCase)
self.assertEqual(loaded_config.num_beams, 1) # default value
@is_staging_test
class _lowerCamelCase( unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase)
@classmethod
def UpperCamelCase ( cls) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('test-generation-config', use_auth_token=self._token)
_lowercase : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
# Reset repo
delete_repo(token=self._token, repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase, repo_id='test-generation-config', push_to_hub=lowerCamelCase, use_auth_token=self._token)
_lowercase : Tuple = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('valid_org/test-generation-config-org', use_auth_token=self._token)
_lowercase : Any = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase, repo_id='valid_org/test-generation-config-org', push_to_hub=lowerCamelCase, use_auth_token=self._token)
_lowercase : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
| 21 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = state_dict.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_snake_case = value
else:
_snake_case = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_snake_case = """"""
if is_panoptic:
_snake_case = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:256, :]
_snake_case = in_proj_bias[:256]
_snake_case = in_proj_weight[256:512, :]
_snake_case = in_proj_bias[256:512]
_snake_case = in_proj_weight[-256:, :]
_snake_case = in_proj_bias[-256:]
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_snake_case = """resnet101"""
if "dc5" in model_name:
_snake_case = True
_snake_case = """panoptic""" in model_name
if is_panoptic:
_snake_case = 250
else:
_snake_case = 91
_snake_case = """huggingface/label-files"""
_snake_case = """coco-detection-id2label.json"""
_snake_case = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
_snake_case = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load image processor
_snake_case = """coco_panoptic""" if is_panoptic else """coco_detection"""
_snake_case = ConditionalDetrImageProcessor(format=_SCREAMING_SNAKE_CASE )
# prepare image
_snake_case = prepare_img()
_snake_case = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_snake_case = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_snake_case = torch.hub.load("""DeppMeng/ConditionalDETR""" , _SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval()
_snake_case = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_snake_case = """conditional_detr.""" + src
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE , is_panoptic=_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_snake_case = state_dict.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_snake_case = state_dict.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_snake_case = state_dict.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_snake_case = state_dict.pop(_SCREAMING_SNAKE_CASE )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = ConditionalDetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=_SCREAMING_SNAKE_CASE , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
_snake_case = conditional_detr(_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 270 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase (self ) -> Dict:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """bird"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase (self ) -> Optional[int]:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """Chef in the kitchen"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 270 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a ( unittest.TestCase ):
def A_ ( self : Tuple ):
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
snake_case_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case_ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase_ , lowercase_ )
def A_ ( self : Tuple , **lowercase_ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase_ )
def A_ ( self : int , **lowercase_ : int ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase_ )
def A_ ( self : Dict , **lowercase_ : Optional[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Tuple ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ = self.get_image_processor(do_normalize=lowercase_ )
snake_case_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def A_ ( self : str ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowercase_ , return_tensors='''np''' )
snake_case_ = processor(images=lowercase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''lower newer'''
snake_case_ = processor(text=lowercase_ , return_tensors='''np''' )
snake_case_ = tokenizer(lowercase_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A_ ( self : Any ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''lower newer'''
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : int ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = ['''cat''', '''nasa badge''']
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : str ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = [['''cat''', '''nasa badge'''], ['''person''']]
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
snake_case_ = len(lowercase_ )
snake_case_ = max([len(lowercase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : Union[str, Any] ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = ['''cat''', '''nasa badge''']
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
snake_case_ = inputs['''input_ids''']
snake_case_ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A_ ( self : List[str] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(images=lowercase_ , query_images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : Any ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowercase_ )
snake_case_ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 56 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
_UpperCamelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_UpperCamelCase : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
_UpperCamelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_UpperCamelCase : Optional[int] = matrix[1][1], matrix[0][0]
_UpperCamelCase : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_UpperCamelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
_UpperCamelCase : Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_UpperCamelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_UpperCamelCase : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_UpperCamelCase : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_UpperCamelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_UpperCamelCase : str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_UpperCamelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_UpperCamelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_UpperCamelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
_UpperCamelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_UpperCamelCase : Tuple = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 352 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCAmelCase__ : Tuple = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=5 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prediction_length
SCREAMING_SNAKE_CASE__ : List[str] = context_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = cardinality
SCREAMING_SNAKE_CASE__ : List[str] = num_time_features
SCREAMING_SNAKE_CASE__ : Optional[int] = lags_sequence
SCREAMING_SNAKE_CASE__ : Dict = embedding_dimension
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = context_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction_length + label_length
SCREAMING_SNAKE_CASE__ : List[Any] = label_length
SCREAMING_SNAKE_CASE__ : Tuple = moving_average
SCREAMING_SNAKE_CASE__ : Union[str, Any] = autocorrelation_factor
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE__ : str = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : Tuple = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
SCREAMING_SNAKE_CASE__ : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase : int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
__UpperCamelCase : str = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , """forward""" ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE__ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , """seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = getattr(self.model_tester , """decoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = getattr(self.model_tester , """d_model""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(self.model_tester , """num_attention_heads""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE__ : List[str] = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_ ( _snake_case="train-batch.pt" ):
SCREAMING_SNAKE_CASE__ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_snake_case ,repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.load(_snake_case ,map_location=_snake_case )
return batch
@require_torch
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1E-1 ) )
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowercase : Any = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowercase__ ( snake_case_ :str = "dhaka" , snake_case_ :int = 5 ):
__UpperCAmelCase = min(A__ , 50 ) # Prevent abuse!
__UpperCAmelCase = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__UpperCAmelCase = requests.get('''https://www.google.com/search''' , params=A__ , headers=A__ )
__UpperCAmelCase = BeautifulSoup(html.text , '''html.parser''' )
__UpperCAmelCase = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
__UpperCAmelCase = json.dumps(A__ )
__UpperCAmelCase = json.loads(A__ )
__UpperCAmelCase = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , A__ , )
if not matched_google_image_data:
return 0
__UpperCAmelCase = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(A__ ) , )
__UpperCAmelCase = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
__UpperCAmelCase = bytes(A__ , '''ascii''' ).decode(
'''unicode-escape''' )
__UpperCAmelCase = bytes(A__ , '''ascii''' ).decode(
'''unicode-escape''' )
__UpperCAmelCase = urllib.request.build_opener()
__UpperCAmelCase = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(A__ )
__UpperCAmelCase = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
_lowercase : List[str] = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 357 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : str , __a : str , __a : Optional[int]=7 , __a : List[str]=3 , __a : Any=18 , __a : List[str]=30 , __a : Dict=400 , __a : Optional[Any]=True , __a : Optional[Any]=None , __a : int=True , __a : int=None , __a : List[str]=True , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def _lowercase (self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = LevitImageProcessor if is_vision_available() else None
def _lowercase (self : int ):
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def _lowercase (self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowercase (self : int ):
pass
def _lowercase (self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Dict ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__A : Tuple = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 326 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A : Union[str, Any] =random.Random()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=None ) -> int:
if rng is None:
lowerCamelCase__ : Dict = global_rng
lowerCamelCase__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self: int , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple=7 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Union[str, Any]=2_000 , UpperCamelCase__: Optional[Any]=10 , UpperCamelCase__: Any=160 , UpperCamelCase__: Union[str, Any]=8 , UpperCamelCase__: int=0.0 , UpperCamelCase__: Optional[Any]=4_000 , UpperCamelCase__: List[str]=False , UpperCamelCase__: str=True , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : str = min_seq_length
lowerCamelCase__ : List[Any] = max_seq_length
lowerCamelCase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : List[Any] = padding_value
lowerCamelCase__ : Any = sampling_rate
lowerCamelCase__ : Dict = return_attention_mask
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Optional[int] = feature_size
lowerCamelCase__ : Union[str, Any] = chunk_length
lowerCamelCase__ : Dict = hop_length
def lowerCamelCase_ ( self: List[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any]=False , UpperCamelCase__: int=False ):
def _flatten(UpperCamelCase__: List[str] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
lowerCamelCase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : Any = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( _lowercase , unittest.TestCase ):
a = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Union[str, Any] = WhisperFeatureExtractionTester(self )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = feat_extract_first.to_dict()
lowerCamelCase__ : List[str] = feat_extract_second.to_dict()
lowerCamelCase__ : List[str] = feat_extract_first.mel_filters
lowerCamelCase__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCamelCase__ : int = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = feat_extract_first.to_dict()
lowerCamelCase__ : Dict = feat_extract_second.to_dict()
lowerCamelCase__ : Union[str, Any] = feat_extract_first.mel_filters
lowerCamelCase__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : str = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ : Dict = feature_extractor(UpperCamelCase__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ : Any = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase__ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
lowerCamelCase__ : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowerCamelCase__ : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase__ : str = np.asarray(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowerCamelCase__ : List[str] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test truncation required
lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCamelCase__ : int = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
lowerCamelCase__ : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase__ : Dict = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs_truncated]
lowerCamelCase__ : str = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowerCamelCase__ : Union[str, Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCamelCase_ ( self: Optional[Any] ):
import torch
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase__ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Tuple = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ : int = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase__ : List[Any] = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self: str ):
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowerCamelCase__ : Optional[int] = self._load_datasamples(1 )
lowerCamelCase__ : Optional[Any] = WhisperFeatureExtractor()
lowerCamelCase__ : Dict = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Dict = self._load_datasamples(1 )[0]
lowerCamelCase__ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCamelCase__ : Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ ) - 1 ) < 1e-3 ) )
| 41 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class a_ ( SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ : str ="trajectory_transformer"
UpperCamelCase__ : List[Any] =["past_key_values"]
UpperCamelCase__ : int ={
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :Dict , _lowercase :Optional[int]=100 , _lowercase :Optional[Any]=5 , _lowercase :Any=1 , _lowercase :Any=1 , _lowercase :Tuple=249 , _lowercase :str=6 , _lowercase :List[str]=17 , _lowercase :Tuple=25 , _lowercase :Any=4 , _lowercase :Optional[Any]=4 , _lowercase :List[str]=128 , _lowercase :str=0.1 , _lowercase :List[str]=0.1 , _lowercase :List[str]=0.1 , _lowercase :Optional[Any]=0.0_006 , _lowercase :Tuple=512 , _lowercase :Optional[int]=0.02 , _lowercase :Tuple=1E-1_2 , _lowercase :Optional[int]=1 , _lowercase :Optional[int]=True , _lowercase :int=1 , _lowercase :Dict=50256 , _lowercase :Union[str, Any]=50256 , **_lowercase :List[Any] , ) -> Optional[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = action_weight
UpperCAmelCase_ = reward_weight
UpperCAmelCase_ = value_weight
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = block_size
UpperCAmelCase_ = action_dim
UpperCAmelCase_ = observation_dim
UpperCAmelCase_ = transition_dim
UpperCAmelCase_ = learning_rate
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = kaiming_initializer_range
UpperCAmelCase_ = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__) | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_UpperCamelCase = F'''{src_lang}-{tgt_lang}'''
_UpperCamelCase = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__snake_case, exist_ok=__snake_case )
_UpperCamelCase = os.path.join(__snake_case, '''README.md''' )
print(F'''Generating {path}''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 194 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 1 |
def A(__a: int = 100 ):
lowerCAmelCase_ = set()
lowerCAmelCase_ = 0
lowerCAmelCase_ = n + 1 # maximum limit
for a in range(2 , __a ):
for b in range(2 , __a ):
lowerCAmelCase_ = a**b # calculates the current power
collect_powers.add(__a ) # adds the result to the set
return len(__a )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 22 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ,):
lowercase_ :str = coefficient_matrix.shape
lowercase_ :Dict = constant_matrix.shape
if rowsa != colsa:
lowercase_ :Tuple = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_UpperCamelCase )
if colsa != 1:
lowercase_ :int = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_UpperCamelCase )
if rowsa != rowsa:
lowercase_ :Union[str, Any] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_UpperCamelCase )
if len(_UpperCamelCase ) != rowsa:
lowercase_ :str = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(_UpperCamelCase )} and {rowsa}'
)
raise ValueError(_UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowercase_ :NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
lowercase_ :str = table.shape
strictly_diagonally_dominant(_UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCamelCase ):
lowercase_ :Tuple = []
for row in range(_UpperCamelCase ):
lowercase_ :Tuple = 0
for col in range(_UpperCamelCase ):
if col == row:
lowercase_ :Optional[Any] = table[row][col]
elif col == cols - 1:
lowercase_ :Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ :Tuple = (temp + val) / denom
new_val.append(_UpperCamelCase )
lowercase_ :Tuple = new_val
return [float(_UpperCamelCase ) for i in new_val]
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :int = table.shape
lowercase_ :Tuple = True
for i in range(0 ,_UpperCamelCase ):
lowercase_ :Tuple = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = parent
A : List[Any] = batch_size
A : int = seq_length
A : Tuple = is_training
A : int = use_input_mask
A : Dict = use_token_type_ids
A : Optional[int] = use_labels
A : int = vocab_size
A : Tuple = hidden_size
A : Union[str, Any] = num_hidden_layers
A : int = num_attention_heads
A : List[Any] = intermediate_size
A : str = hidden_act
A : str = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Dict = max_position_embeddings
A : int = type_vocab_size
A : int = type_sequence_label_size
A : Optional[Any] = initializer_range
A : Tuple = num_labels
A : Any = num_choices
A : str = scope
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : Tuple = None
if self.use_token_type_ids:
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Dict = None
A : Union[str, Any] = None
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Any = ids_tensor([self.batch_size] , self.num_choices )
A : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , use_stable_embedding=_A , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = OpenLlamaModel(config=_A )
model.to(_A )
model.eval()
A : List[Any] = model(_A , attention_mask=_A )
A : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = True
A : Any = OpenLlamaModel(_A )
model.to(_A )
model.eval()
A : Dict = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
A : List[str] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
A : List[str] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = OpenLlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
A : int = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : str = True
A : Tuple = True
A : List[str] = OpenLlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
A : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
A : Any = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
A : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : int = self.prepare_config_and_inputs()
(
A
) : int = config_and_inputs
A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = OpenLlamaModelTester(self )
A : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : Any = type
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A : List[Any] = 3
A : List[str] = input_dict['input_ids']
A : List[str] = input_ids.ne(1 ).to(_A )
A : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : str = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
A : Optional[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = 3
A : List[Any] = 'single_label_classification'
A : int = input_dict['input_ids']
A : str = input_ids.ne(1 ).to(_A )
A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : str = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
A : Union[str, Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = 3
A : List[Any] = 'multi_label_classification'
A : Union[str, Any] = input_dict['input_ids']
A : List[str] = input_ids.ne(1 ).to(_A )
A : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A : Union[str, Any] = OpenLlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
A : Union[str, Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ids_tensor([1, 10] , config.vocab_size )
A : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : List[Any] = OpenLlamaModel(_A )
original_model.to(_A )
original_model.eval()
A : str = original_model(_A ).last_hidden_state
A : List[Any] = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Optional[Any] = {'type': scaling_type, 'factor': 10.0}
A : Dict = OpenLlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
A : Dict = scaled_model(_A ).last_hidden_state
A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
| 350 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Dict:
_a : Dict =tmp_path / """file.csv"""
_a : Union[str, Any] =textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> Tuple:
_a : List[Any] =tmp_path / """malformed_file.csv"""
_a : Union[str, Any] =textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> Any:
_a : int =tmp_path / """csv_with_image.csv"""
_a : Tuple =textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> str:
_a : int =tmp_path / """csv_with_label.csv"""
_a : Union[str, Any] =textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Tuple:
_a : Optional[int] =tmp_path / """csv_with_int_list.csv"""
_a : Any =textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
_a : Union[str, Any] =Csv()
_a : List[str] =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_UpperCAmelCase ,match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Any:
with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f:
_a : Optional[int] =f.read().splitlines()[1]
_a : Dict =Csv(encoding="""utf-8""" ,features=Features({"""image""": Image()} ) )
_a : Any =csv._generate_tables([[csv_file_with_image]] )
_a : List[str] =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
_a : Tuple =pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> str:
with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f:
_a : str =f.read().splitlines()[1:]
_a : Any =Csv(encoding="""utf-8""" ,features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
_a : Dict =csv._generate_tables([[csv_file_with_label]] )
_a : Union[str, Any] =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
_a : Any =pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_UpperCAmelCase ) for label in labels]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Tuple:
_a : List[str] =Csv(encoding="""utf-8""" ,sep=""",""" ,converters={"""int_list""": lambda _UpperCAmelCase : [int(_UpperCAmelCase ) for i in x.split()]} )
_a : int =csv._generate_tables([[csv_file_with_int_list]] )
_a : int =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
_a : str =pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 276 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Optional[int] , a_ : int ) -> Any:
# Load checkpoint
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.load(a_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__SCREAMING_SNAKE_CASE :Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__SCREAMING_SNAKE_CASE :Union[str, Any] = v
else:
__SCREAMING_SNAKE_CASE :Any = v
__SCREAMING_SNAKE_CASE :List[Any] = chkpt['''params''']
__SCREAMING_SNAKE_CASE :Optional[int] = {n: v for n, v in config.items() if not isinstance(a_ , (torch.FloatTensor, numpy.ndarray) )}
__SCREAMING_SNAKE_CASE :Optional[Any] = chkpt['''dico_word2id''']
__SCREAMING_SNAKE_CASE :Any = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__SCREAMING_SNAKE_CASE :List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE :int = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__SCREAMING_SNAKE_CASE :Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(a_ , a_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a_ , indent=2 ) + '''\n''' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a_ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 239 |
"""simple docstring"""
import math
import random
def __lowerCamelCase ( a_ : float , a_ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def __lowerCamelCase ( a_ : int , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :Any = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(a_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE :Tuple = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_1_error * sigmoid_function(a_ , a_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Expected value: "))
lowerCamelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations)) | 239 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='unispeech'
def __init__( self, lowerCAmelCase=32, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase="group", lowerCAmelCase="gelu", lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512), lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2), lowerCAmelCase=False, lowerCAmelCase=128, lowerCAmelCase=16, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.0_5, lowerCAmelCase=10, lowerCAmelCase=2, lowerCAmelCase=0.0, lowerCAmelCase=10, lowerCAmelCase=0, lowerCAmelCase=320, lowerCAmelCase=2, lowerCAmelCase=0.1, lowerCAmelCase=100, lowerCAmelCase=256, lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase="mean", lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=256, lowerCAmelCase=80, lowerCAmelCase=0, lowerCAmelCase=1, lowerCAmelCase=2, lowerCAmelCase=0.5, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase, pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =feat_extract_norm
lowerCamelCase_ =feat_extract_activation
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =conv_bias
lowerCamelCase_ =num_conv_pos_embeddings
lowerCamelCase_ =num_conv_pos_embedding_groups
lowerCamelCase_ =len(self.conv_dim )
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =feat_proj_dropout
lowerCamelCase_ =final_dropout
lowerCamelCase_ =layerdrop
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_ctc_classes
lowerCamelCase_ =vocab_size
lowerCamelCase_ =do_stable_layer_norm
lowerCamelCase_ =use_weighted_layer_sum
lowerCamelCase_ =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ =apply_spec_augment
lowerCamelCase_ =mask_time_prob
lowerCamelCase_ =mask_time_length
lowerCamelCase_ =mask_time_min_masks
lowerCamelCase_ =mask_feature_prob
lowerCamelCase_ =mask_feature_length
lowerCamelCase_ =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ =num_codevectors_per_group
lowerCamelCase_ =num_codevector_groups
lowerCamelCase_ =contrastive_logits_temperature
lowerCamelCase_ =feat_quantizer_dropout
lowerCamelCase_ =num_negatives
lowerCamelCase_ =codevector_dim
lowerCamelCase_ =proj_codevector_dim
lowerCamelCase_ =diversity_loss_weight
# ctc loss
lowerCamelCase_ =ctc_loss_reduction
lowerCamelCase_ =ctc_zero_infinity
# pretraining loss
lowerCamelCase_ =replace_prob
@property
def lowercase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 75 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """Wav2Vec2FeatureExtractor"""
_snake_case = """AutoTokenizer"""
def __init__( self , A , A ) -> List[str]:
super().__init__(A , A )
snake_case : Union[str, Any] = self.feature_extractor
snake_case : Optional[Any] = False
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> Tuple:
try:
return super().from_pretrained(A , **A )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , A , )
snake_case : str = WavaVecaFeatureExtractor.from_pretrained(A , **A )
snake_case : Optional[int] = WavaVecaCTCTokenizer.from_pretrained(A , **A )
return cls(feature_extractor=A , tokenizer=A )
def __call__( self , *A , **A ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
snake_case : int = kwargs.pop("""raw_speech""" )
else:
snake_case : str = kwargs.pop("""audio""" , A )
snake_case : str = kwargs.pop("""sampling_rate""" , A )
snake_case : Dict = kwargs.pop("""text""" , A )
if len(A ) > 0:
snake_case : List[str] = args[0]
snake_case : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case : str = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
snake_case : str = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case : List[Any] = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self , *A , **A ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A , **A )
snake_case : Any = kwargs.pop("""input_features""" , A )
snake_case : Optional[Any] = kwargs.pop("""labels""" , A )
if len(A ) > 0:
snake_case : Optional[Any] = args[0]
snake_case : str = args[1:]
if input_features is not None:
snake_case : Dict = self.feature_extractor.pad(A , *A , **A )
if labels is not None:
snake_case : List[str] = self.tokenizer.pad(A , **A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case : Union[str, Any] = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self , *A , **A ) -> Tuple:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> Optional[int]:
return self.tokenizer.decode(*A , **A )
@contextmanager
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = self.tokenizer
yield
snake_case : Any = self.feature_extractor
snake_case : Union[str, Any] = False
| 176 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__: Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__magic_name__ : str = [144, 192, 240]
__magic_name__ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__magic_name__ : List[str] = [96, 120, 144]
__magic_name__ : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__magic_name__ : List[Any] = [64, 80, 96]
__magic_name__ : Any = [16, 16, 24, 48, 64, 80, 320]
__magic_name__ : List[str] = 0.05
__magic_name__ : Dict = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__magic_name__ : Optional[Any] = 512
__magic_name__ : List[Any] = 16
__magic_name__ : List[str] = 21
__magic_name__ : int = """pascal-voc-id2label.json"""
else:
__magic_name__ : Optional[Any] = 1000
__magic_name__ : Optional[Any] = """imagenet-1k-id2label.json"""
__magic_name__ : Tuple = """huggingface/label-files"""
__magic_name__ : str = json.load(open(hf_hub_download(_A, _A, repo_type="""dataset""" ), """r""" ) )
__magic_name__ : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
__magic_name__ : Any = idalabel
__magic_name__ : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( _A, _A=False ):
"""simple docstring"""
for i in range(1, 6 ):
if f'layer_{i}.' in name:
__magic_name__ : Optional[int] = name.replace(f'layer_{i}.', f'encoder.layer.{i - 1}.' )
if "conv_1." in name:
__magic_name__ : Optional[int] = name.replace("""conv_1.""", """conv_stem.""" )
if ".block." in name:
__magic_name__ : Optional[int] = name.replace(""".block.""", """.""" )
if "exp_1x1" in name:
__magic_name__ : int = name.replace("""exp_1x1""", """expand_1x1""" )
if "red_1x1" in name:
__magic_name__ : Any = name.replace("""red_1x1""", """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__magic_name__ : Optional[Any] = name.replace(""".local_rep.conv_3x3.""", """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__magic_name__ : Union[str, Any] = name.replace(""".local_rep.conv_1x1.""", """.conv_1x1.""" )
if ".norm." in name:
__magic_name__ : List[str] = name.replace(""".norm.""", """.normalization.""" )
if ".conv." in name:
__magic_name__ : List[Any] = name.replace(""".conv.""", """.convolution.""" )
if ".conv_proj." in name:
__magic_name__ : str = name.replace(""".conv_proj.""", """.conv_projection.""" )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'.{i}.{j}.' in name:
__magic_name__ : List[str] = name.replace(f'.{i}.{j}.', f'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'.{i}.{j}.' in name:
__magic_name__ : Tuple = name.replace(f'.{i}.{j}.', f'.{i}.' )
if "expand_1x1" in name:
__magic_name__ : Dict = name.replace("""expand_1x1""", """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__magic_name__ : Union[str, Any] = name.replace("""conv_3x3""", """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__magic_name__ : Union[str, Any] = name.replace("""reduce_1x1""", """downsampling_layer.reduce_1x1""" )
for i in range(2, 5 ):
if f'.global_rep.{i}.weight' in name:
__magic_name__ : Union[str, Any] = name.replace(f'.global_rep.{i}.weight', """.layernorm.weight""" )
if f'.global_rep.{i}.bias' in name:
__magic_name__ : Any = name.replace(f'.global_rep.{i}.bias', """.layernorm.bias""" )
if ".global_rep." in name:
__magic_name__ : int = name.replace(""".global_rep.""", """.transformer.""" )
if ".pre_norm_mha.0." in name:
__magic_name__ : Union[str, Any] = name.replace(""".pre_norm_mha.0.""", """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__magic_name__ : List[str] = name.replace(""".pre_norm_mha.1.out_proj.""", """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__magic_name__ : Dict = name.replace(""".pre_norm_ffn.0.""", """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__magic_name__ : Union[str, Any] = name.replace(""".pre_norm_ffn.1.""", """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__magic_name__ : Dict = name.replace(""".pre_norm_ffn.4.""", """.output.dense.""" )
if ".transformer." in name:
__magic_name__ : List[Any] = name.replace(""".transformer.""", """.transformer.layer.""" )
if ".aspp_layer." in name:
__magic_name__ : str = name.replace(""".aspp_layer.""", """.""" )
if ".aspp_pool." in name:
__magic_name__ : Any = name.replace(""".aspp_pool.""", """.""" )
if "seg_head." in name:
__magic_name__ : List[Any] = name.replace("""seg_head.""", """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__magic_name__ : Any = name.replace("""segmentation_head.classifier.classifier.""", """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__magic_name__ : Any = name.replace("""classifier.fc.""", """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__magic_name__ : Optional[int] = """mobilevit.""" + name
return name
def UpperCamelCase ( _A, _A, _A=False ):
"""simple docstring"""
if base_model:
__magic_name__ : str = """"""
else:
__magic_name__ : List[str] = """mobilevit."""
for key in orig_state_dict.copy().keys():
__magic_name__ : str = orig_state_dict.pop(_A )
if key[:8] == "encoder.":
__magic_name__ : Dict = key[8:]
if "qkv" in key:
__magic_name__ : List[str] = key.split(""".""" )
__magic_name__ : str = int(key_split[0][6:] ) - 1
__magic_name__ : Any = int(key_split[3] )
__magic_name__ : int = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}' )
__magic_name__ : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__magic_name__ : str = (
f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
__magic_name__ : Tuple = val[:dim, :]
__magic_name__ : Union[str, Any] = val[dim : dim * 2, :]
__magic_name__ : Any = val[-dim:, :]
else:
__magic_name__ : Tuple = val[:dim]
__magic_name__ : str = val[dim : dim * 2]
__magic_name__ : List[str] = val[-dim:]
else:
__magic_name__ : Any = val
return orig_state_dict
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Optional[int] = Image.open(requests.get(_A, stream=_A ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A, _A=False ):
"""simple docstring"""
__magic_name__ : Tuple = get_mobilevit_config(_A )
# load original state_dict
__magic_name__ : Optional[int] = torch.load(_A, map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__magic_name__ : Tuple = MobileViTForSemanticSegmentation(_A ).eval()
else:
__magic_name__ : Dict = MobileViTForImageClassification(_A ).eval()
__magic_name__ : str = convert_state_dict(_A, _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by MobileViTImageProcessor
__magic_name__ : Any = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__magic_name__ : Optional[int] = image_processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ : Union[str, Any] = model(**_A )
__magic_name__ : str = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__magic_name__ : Dict = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__magic_name__ : Optional[int] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__magic_name__ : int = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], _A, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__magic_name__ : Any = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__magic_name__ : int = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__magic_name__ : Any = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], _A, atol=1e-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
__magic_name__ : Dict = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__magic_name__ : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(_A, organization="""apple""" )
model.push_to_hub(_A, organization="""apple""" )
if __name__ == "__main__":
__magic_name__: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__: int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__magic_name__: int = logging.getLogger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
lowercase__ : Optional[int] = """summarization"""
lowercase__ : List[str] = ["""loss"""]
lowercase__ : List[Any] = ROUGE_KEYS
lowercase__ : Tuple = """rouge2"""
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
if hparams.sortish_sampler and hparams.gpus > 1:
__magic_name__ : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , mode=self.mode , **__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__magic_name__ : Optional[int] = Path(self.output_dir ) / """metrics.json"""
__magic_name__ : int = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__magic_name__ : List[str] = 0
__magic_name__ : Optional[int] = defaultdict(__SCREAMING_SNAKE_CASE )
__magic_name__ : str = self.config.model_type
__magic_name__ : Optional[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__magic_name__ : str = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
__magic_name__ : Union[str, Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__magic_name__ : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__magic_name__ : List[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__magic_name__ : Tuple = get_git_info()["""repo_sha"""]
__magic_name__ : Tuple = hparams.num_workers
__magic_name__ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ):
__magic_name__ : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__magic_name__ : List[Any] = self.decoder_start_token_id
__magic_name__ : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__magic_name__ : List[str] = False
__magic_name__ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__magic_name__ : Tuple = self.hparams.eval_max_gen_length
else:
__magic_name__ : int = self.model.config.max_length
__magic_name__ : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__magic_name__ : str = True
return readable_batch
def __magic_name__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return self.model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = self.tokenizer.pad_token_id
__magic_name__ ,__magic_name__ : List[str] = batch["""input_ids"""], batch["""attention_mask"""]
__magic_name__ : Optional[int] = batch["""labels"""]
if isinstance(self.model , __SCREAMING_SNAKE_CASE ):
__magic_name__ : Union[str, Any] = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
__magic_name__ : List[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__magic_name__ : Optional[int] = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
__magic_name__ : Union[str, Any] = self(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__magic_name__ : List[str] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__magic_name__ : str = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
__magic_name__ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__magic_name__ : Tuple = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__magic_name__ ,__magic_name__ : Dict = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def __magic_name__ ( self ) -> List[Any]:
return self.tokenizer.pad_token_id
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : List[str] = self._step(__SCREAMING_SNAKE_CASE )
__magic_name__ : Dict = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
# tokens per batch
__magic_name__ : Any = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__magic_name__ : str = batch["""input_ids"""].shape[0]
__magic_name__ : Any = batch["""input_ids"""].eq(self.pad ).sum()
__magic_name__ : List[Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="val" ) -> Optional[int]:
self.step_count += 1
__magic_name__ : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__magic_name__ : Tuple = losses["""loss"""]
__magic_name__ : List[Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__magic_name__ : Dict = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__magic_name__ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
__magic_name__ : Optional[Any] = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
__magic_name__ : str = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
__magic_name__ : List[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__magic_name__ : Dict = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__magic_name__ : Dict = (time.time() - ta) / batch["""input_ids"""].shape[0]
__magic_name__ : Optional[int] = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
__magic_name__ : Union[str, Any] = self.ids_to_clean_text(batch["""labels"""] )
__magic_name__ : List[Any] = self._step(__SCREAMING_SNAKE_CASE )
__magic_name__ : Optional[int] = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
__magic_name__ : List[str] = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__magic_name__ : Any = np.mean(lmap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE , gen_len=__SCREAMING_SNAKE_CASE , preds=__SCREAMING_SNAKE_CASE , target=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return base_metrics
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE , prefix="""test""" )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Union[str, Any] = self.n_obs[type_path]
__magic_name__ : Dict = self.target_lens[type_path]
__magic_name__ : Tuple = self.dataset_class(
self.tokenizer , type_path=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> str:
__magic_name__ : Union[str, Any] = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__magic_name__ : List[str] = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__magic_name__ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def __magic_name__ ( self ) -> List[Any]:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def __magic_name__ ( self ) -> int:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __magic_name__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--max_tokens_per_batch""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--logger_name""" , type=__SCREAMING_SNAKE_CASE , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__SCREAMING_SNAKE_CASE , default=5_00 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__SCREAMING_SNAKE_CASE , default="""summarization""" , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__SCREAMING_SNAKE_CASE , default=0.0 , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--src_lang""" , type=__SCREAMING_SNAKE_CASE , default="""""" , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--tgt_lang""" , type=__SCREAMING_SNAKE_CASE , default="""""" , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_beams""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--val_metric""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__SCREAMING_SNAKE_CASE , default=1 , required=__SCREAMING_SNAKE_CASE , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class snake_case__ ( lowerCAmelCase_ ):
lowercase__ : int = """translation"""
lowercase__ : Optional[Any] = ["""loss"""]
lowercase__ : Optional[Any] = ["""bleu"""]
lowercase__ : Dict = """bleu"""
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__magic_name__ : str = hparams.src_lang
__magic_name__ : Tuple = hparams.tgt_lang
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
return calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( _A, _A=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_A )
check_output_dir(_A, expected_items=3 )
if model is None:
if "summarization" in args.task:
__magic_name__ : Any = SummarizationModule(_A )
else:
__magic_name__ : Dict = TranslationModule(_A )
__magic_name__ : Optional[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__magic_name__ : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ : Union[str, Any] = os.environ.get("""WANDB_PROJECT""", _A )
__magic_name__ : Any = WandbLogger(name=model.output_dir.name, project=_A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ : Union[str, Any] = WandbLogger(name=model.output_dir.name, project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
__magic_name__ : Dict = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
__magic_name__ : Union[str, Any] = False
__magic_name__ : Optional[Any] = args.val_metric == """loss"""
__magic_name__ : Any = generic_train(
_A, _A, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, _A ), early_stopping_callback=_A, logger=_A, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__magic_name__ : str = """"""
__magic_name__ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=_A ) )
if checkpoints:
__magic_name__ : Optional[int] = checkpoints[-1]
__magic_name__ : List[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__magic_name__: Optional[Any] = argparse.ArgumentParser()
__magic_name__: str = pl.Trainer.add_argparse_args(parser)
__magic_name__: Dict = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__magic_name__: Tuple = parser.parse_args()
main(args)
| 352 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 0 |
'''simple docstring'''
from math import isclose, sqrt
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] ) -> tuple[float, float, float]:
'''simple docstring'''
_a = point_y / 4 / point_x
_a = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_a = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_a = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_a = outgoing_gradient**2 + 4
_a = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_a = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
_a = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_a = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_a = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
_a = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _A (lowerCAmelCase__ :List[Any] = 1.4 , lowerCAmelCase__ :List[Any] = -9.6 ) -> int:
'''simple docstring'''
_a = 0
_a = first_x_coord
_a = first_y_coord
_a = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_a = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 | 0 |
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Tuple=None , lowercase : str=None ):
'''simple docstring'''
_snake_case = start
_snake_case = end
_snake_case = val
_snake_case = (start + end) // 2
_snake_case = left
_snake_case = right
def __repr__( self : Any ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : Sequence , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = collection
_snake_case = function
if self.collection:
_snake_case = self._build_tree(0 , len(lowercase ) - 1 )
def A ( self : List[str] , lowercase : List[Any] , lowercase : List[Any] ):
'''simple docstring'''
self._update_tree(self.root , lowercase , lowercase )
def A ( self : Optional[int] , lowercase : Dict , lowercase : int ):
'''simple docstring'''
return self._query_range(self.root , lowercase , lowercase )
def A ( self : List[str] , lowercase : str , lowercase : Optional[Any] ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start] )
_snake_case = (start + end) // 2
_snake_case = self._build_tree(lowercase , lowercase )
_snake_case = self._build_tree(mid + 1 , lowercase )
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val ) , lowercase , lowercase )
def A ( self : Any , lowercase : List[Any] , lowercase : str , lowercase : Any ):
'''simple docstring'''
if node.start == i and node.end == i:
_snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase )
else:
self._update_tree(node.right , lowercase , lowercase )
_snake_case = self.fn(node.left.val , node.right.val )
def A ( self : Dict , lowercase : str , lowercase : str , lowercase : str ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase )
def A ( self : int ):
'''simple docstring'''
if self.root is not None:
_snake_case = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
_lowerCamelCase : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 130 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : List[str] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : str ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : List[str] , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : Tuple , lowercase : Tuple ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 130 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Tuple = VOCAB_FILES_NAMES
a_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ["input_ids", "attention_mask"]
a_ : Tuple = BlenderbotTokenizer
def __init__( self : int , a_ : Dict=None , a_ : Dict=None , a_ : Dict=None , a_ : Union[str, Any]="replace" , a_ : Optional[Any]="<s>" , a_ : Dict="</s>" , a_ : Tuple="</s>" , a_ : List[Any]="<s>" , a_ : Tuple="<unk>" , a_ : Dict="<pad>" , a_ : Optional[int]="<mask>" , a_ : Tuple=False , a_ : str=True , **a_ : Tuple , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : Union[str, Any] = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : str = add_prefix_space
lowerCAmelCase_ : Any = pre_tok_class(**a_ )
lowerCAmelCase_ : Tuple = add_prefix_space
lowerCAmelCase_ : int = """post_processor"""
lowerCAmelCase_ : Dict = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Any = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Union[str, Any] = tuple(state["cls"] )
lowerCAmelCase_ : int = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Union[str, Any] = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : str = trim_offsets
lowerCAmelCase_ : str = True
if changes_to_apply:
lowerCAmelCase_ : Union[str, Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : int = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : Tuple , a_ : int ):
lowerCAmelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : List[Any] = value
def lowerCamelCase ( self : int , *a_ : Any , **a_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : Tuple , *a_ : Dict , **a_ : Tuple ):
lowerCAmelCase_ : List[str] = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : Dict , a_ : Tuple , a_ : List[str] = None ):
lowerCAmelCase_ : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : List[str] , a_ : Union[str, Any] , a_ : Tuple = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : Any , a_ : str , a_ : Any = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : Union[str, Any] ):
lowerCAmelCase_ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : List[Any] = """ """.join(a_ )
lowerCAmelCase_ : Optional[Any] = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Any = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 241 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase : Any = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase : str = features.copy() if features else default_expected_features
lowercase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
lowercase : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = tmp_path / """cache"""
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase : List[str] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Dict = tmp_path / """cache"""
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase : Optional[int] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : str = tmp_path / """cache"""
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 20 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = 'xlnet'
lowerCAmelCase : int = ['mems']
lowerCAmelCase : Optional[int] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] ,_UpperCAmelCase : List[Any]=32000 ,_UpperCAmelCase : int=1024 ,_UpperCAmelCase : Optional[Any]=24 ,_UpperCAmelCase : str=16 ,_UpperCAmelCase : int=4096 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Dict="bi" ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : str=1E-12 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : List[Any]=-1 ,_UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : List[str]="last" ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Tuple="tanh" ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : List[Any]=5 ,_UpperCAmelCase : str=5 ,_UpperCAmelCase : Dict=5 ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[Any]=2 ,**_UpperCAmelCase : Optional[Any] ,):
_a : Dict = vocab_size
_a : Union[str, Any] = d_model
_a : Optional[int] = n_layer
_a : Union[str, Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_a : List[Any] = d_model // n_head
_a : Dict = ff_activation
_a : int = d_inner
_a : int = untie_r
_a : Dict = attn_type
_a : Tuple = initializer_range
_a : Tuple = layer_norm_eps
_a : Union[str, Any] = dropout
_a : int = mem_len
_a : str = reuse_len
_a : List[Any] = bi_data
_a : Dict = clamp_len
_a : int = same_length
_a : int = summary_type
_a : Any = summary_use_proj
_a : Dict = summary_activation
_a : Tuple = summary_last_dropout
_a : Any = start_n_top
_a : Optional[Any] = end_n_top
_a : List[str] = bos_token_id
_a : Dict = pad_token_id
_a : List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' ,_UpperCAmelCase ,)
_a : List[Any] = kwargs['use_cache']
_a : Optional[Any] = use_mems_eval
_a : List[Any] = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Optional[Any] ):
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 107 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | 1 |
"""simple docstring"""
class a :
def __init__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =None
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Dict =graph
self._normalize_graph(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =None
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
if sources is int:
SCREAMING_SNAKE_CASE_: List[str] =[sources]
if sinks is int:
SCREAMING_SNAKE_CASE_: Tuple =[sinks]
if len(lowerCAmelCase ) == 0 or len(lowerCAmelCase ) == 0:
return
SCREAMING_SNAKE_CASE_: List[Any] =sources[0]
SCREAMING_SNAKE_CASE_: List[Any] =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCAmelCase ) > 1 or len(lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE_: Any =0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_: Any =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_: Optional[int] =max_input_flow
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: List[Any] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_: Optional[Any] =max_input_flow
SCREAMING_SNAKE_CASE_: List[str] =size - 1
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =algorithm(self )
class a :
def __init__( self : str , lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =flow_network
SCREAMING_SNAKE_CASE_: List[Any] =flow_network.verticesCount
SCREAMING_SNAKE_CASE_: Tuple =flow_network.sourceIndex
SCREAMING_SNAKE_CASE_: Tuple =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_: Tuple =flow_network.graph
SCREAMING_SNAKE_CASE_: List[Any] =False
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
pass
class a ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , lowerCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
# use this to save your result
SCREAMING_SNAKE_CASE_: int =-1
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class a ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> int:
'''simple docstring'''
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_: str =[0] * self.verticies_count
SCREAMING_SNAKE_CASE_: str =[0] * self.verticies_count
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_: List[Any] =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
while i < len(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =vertices_list[i]
SCREAMING_SNAKE_CASE_: List[str] =self.heights[vertex_index]
self.process_vertex(lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[str] =0
else:
i += 1
SCREAMING_SNAKE_CASE_: Dict =sum(self.preflow[self.source_index] )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCAmelCase , lowerCAmelCase )
self.relabel(lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_: int =self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =min_height + 1
if __name__ == "__main__":
_UpperCAmelCase = [0]
_UpperCAmelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_UpperCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_UpperCAmelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_UpperCAmelCase = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 173 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE_: List[Any] =canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ):
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ):
# laplace diagonals
SCREAMING_SNAKE_CASE_: str =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE_: Tuple =conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ):
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
SCREAMING_SNAKE_CASE_: Dict =bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
SCREAMING_SNAKE_CASE_: int =rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE_: Tuple =imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: List[Any] =image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE_: Optional[Any] =lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE_: Dict =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE_: List[str] =lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 173 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor((1, 3, 128, 128), rng=random.Random(__a))
_lowerCAmelCase : Optional[Any] = np.random.RandomState(__a)
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Tuple = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
# warmup pass to apply optimizations
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs())
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ort.SessionOptions()
_lowerCAmelCase : Dict = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Dict = init_image.resize((768, 512))
# using the PNDM scheduler by default
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : str = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=__a, output_type="np", )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : int = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Tuple = init_image.resize((768, 512))
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=__a, safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Optional[int] = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=__a, output_type="np", )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Any = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 300 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : int = {
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = GPTaTokenizer
def __init__( self :Tuple , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None , snake_case :Tuple=None , snake_case :Optional[Any]="<|endoftext|>" , snake_case :List[str]="<|endoftext|>" , snake_case :Tuple="<|endoftext|>" , snake_case :Dict=False , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
A_ : List[Any] = kwargs.pop("add_bos_token" , snake_case )
A_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Optional[int] = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Any = add_prefix_space
A_ : Optional[Any] = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self :Optional[int] , *snake_case :Any , **snake_case :int ):
'''simple docstring'''
A_ : int = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , *snake_case :Optional[Any] , **snake_case :Dict ):
'''simple docstring'''
A_ : int = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : List[str] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :"Conversation" ):
'''simple docstring'''
A_ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A_ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 300 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_SCREAMING_SNAKE_CASE : List[Any] = frozenset(["prompt", "negative_prompt"])
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset([])
_SCREAMING_SNAKE_CASE : List[str] = frozenset(["image"])
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset(["image"])
_SCREAMING_SNAKE_CASE : Any = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_SCREAMING_SNAKE_CASE : Any = frozenset(["prompt", "image", "negative_prompt"])
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE : List[Any] = frozenset(["image", "mask_image"])
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE : Dict = frozenset(["example_image", "image", "mask_image"])
_SCREAMING_SNAKE_CASE : List[str] = frozenset(["class_labels"])
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset(["class_labels"])
_SCREAMING_SNAKE_CASE : List[Any] = frozenset(["batch_size"])
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset([])
_SCREAMING_SNAKE_CASE : Dict = frozenset(["batch_size"])
_SCREAMING_SNAKE_CASE : int = frozenset([])
_SCREAMING_SNAKE_CASE : str = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset(["prompt", "negative_prompt"])
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset(["input_tokens"])
_SCREAMING_SNAKE_CASE : Dict = frozenset(["input_tokens"])
| 92 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCamelCase_( snake_case : str , snake_case : complex , snake_case : str = "x" , snake_case : float = 1_0**-1_0 , snake_case : int = 1 , ):
'''simple docstring'''
snake_case_ = symbols(snake_case )
snake_case_ = lambdify(snake_case , snake_case )
snake_case_ = lambdify(snake_case , diff(snake_case , snake_case ) )
snake_case_ = starting_point
while True:
if diff_function(snake_case ) != 0:
snake_case_ = prev_guess - multiplicity * func(snake_case ) / diff_function(
snake_case )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 92 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : List[str] = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Tuple:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Tuple:
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = metric_id
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = [MetricMock(_SCREAMING_SNAKE_CASE ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowerCAmelCase_ ( self : Any ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> str:
if "tmp_path" in args:
SCREAMING_SNAKE_CASE_ = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__UpperCAmelCase , match='https://huggingface.co/docs/evaluate' ):
func(*__UpperCAmelCase ) | 210 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=10_000 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=2_048 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any="relu" , _lowerCAmelCase : Any=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[int]=6_000 , _lowerCAmelCase : Tuple=1_024 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=(5, 5) , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : List[Any]=80 , _lowerCAmelCase : List[Any]=1 , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 210 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case__ = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=_A )
snake_case__ = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 307 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a__ ( lowercase : List[str], lowercase : Any ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = k_size // 2
_UpperCamelCase , _UpperCamelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCamelCase = 1 / (2 * pi * sigma) * exp(-(square(lowercase ) + square(lowercase )) / (2 * square(lowercase )) )
return g
def a__ ( lowercase : str, lowercase : Union[str, Any], lowercase : List[str] ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = image.shape[0], image.shape[1]
# dst image height and width
_UpperCamelCase = height - k_size + 1
_UpperCamelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCamelCase = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCamelCase = 0
for i, j in product(range(lowercase ), range(lowercase ) ):
_UpperCamelCase = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCamelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCamelCase = gen_gaussian_kernel(lowercase, lowercase )
_UpperCamelCase = ravel(lowercase )
# reshape and get the dst image
_UpperCamelCase = dot(lowercase, lowercase ).reshape(lowercase, lowercase ).astype(lowercase )
return dst
if __name__ == "__main__":
# read original image
lowercase__ : Any = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
lowercase__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ : int = gaussian_filter(gray, 3, sigma=1)
lowercase__ : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 361 |
'''simple docstring'''
from math import isclose, sqrt
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple[float, float, float]:
"""simple docstring"""
_UpperCamelCase = point_y / 4 / point_x
_UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCamelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCamelCase = outgoing_gradient**2 + 4
_UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
_UpperCamelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCamelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCamelCase = x_minus if isclose(lowercase, lowercase ) else x_plus
_UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( lowercase : float = 1.4, lowercase : float = -9.6 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = first_x_coord
_UpperCamelCase = first_y_coord
_UpperCamelCase = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowercase, lowercase, lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287 | 0 |
# flake8: noqa
# Lint as: python3
UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 87 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """Morse code here!"""
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __a ( datasets.BuilderConfig ):
__lowercase : int = 1_0000
__lowercase : Optional[List[str]] = None
__lowercase : Optional[datasets.Features] = None
class __a ( datasets.ArrowBasedBuilder ):
__lowercase : Optional[Any] = ParquetConfig
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowercase__: List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
lowercase__: Union[str, Any] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__: Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowercase__: Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__: int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'rb' ) as f:
lowercase__: Any = datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__: List[Any] = table_cast(lowerCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , 'rb' ) as f:
lowercase__: int = pq.ParquetFile(lowerCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowercase__: Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise
| 288 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase = '''path-to-your-trained-model'''
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
__lowerCAmelCase = '''A photo of sks dog in a bucket'''
__lowerCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 288 | 1 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__A : List[str] = True
from torch.cuda.amp import autocast
__A : Any = logging.getLogger(__name__)
def lowercase ( __snake_case : Any=None , __snake_case : Union[str, Any]=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=_A , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : WavaVecaProcessor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self : Optional[int] , A : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase_ : List[Any] = [{'''input_values''': feature['''input_values''']} for feature in features]
lowercase_ : Optional[int] = [{'''input_ids''': feature['''labels''']} for feature in features]
lowercase_ : Tuple = self.processor.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase_ : Any = self.processor.pad(
labels=A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowercase_ : Tuple = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
lowercase_ : List[Any] = labels
return batch
class _UpperCAmelCase ( _A ):
def A ( self : Optional[int] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
lowercase_ : Dict = self._prepare_inputs(A )
if self.use_amp:
with autocast():
lowercase_ : Union[str, Any] = self.compute_loss(A , A )
else:
lowercase_ : List[str] = self.compute_loss(A , A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ : Optional[int] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
return loss.detach()
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase_ : Tuple = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase_ : Tuple = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowercase_ : Optional[Any] = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__snake_case : Dict ):
lowercase_ : Optional[int] = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowercase_ : Optional[Any] = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
lowercase_ : Union[str, Any] = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case : Optional[int] ):
lowercase_ : str = ''' '''.join(batch['''text'''] )
lowercase_ : Optional[int] = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase_ : int = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
lowercase_ : Any = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
lowercase_ : int = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowercase_ : Optional[int] = {v: k for k, v in enumerate(__snake_case )}
lowercase_ : List[Any] = vocab_dict[''' ''']
del vocab_dict[" "]
lowercase_ : int = len(__snake_case )
lowercase_ : Tuple = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Any = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowercase_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
lowercase_ : str = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
lowercase_ : Any = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase_ : int = min(len(__snake_case ) , data_args.max_train_samples )
lowercase_ : List[Any] = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
lowercase_ : int = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase_ : Tuple = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case : List[str] ):
lowercase_ , lowercase_ : List[Any] = torchaudio.load(batch['''path'''] )
lowercase_ : Any = resampler(__snake_case ).squeeze().numpy()
lowercase_ : Any = 1_6_0_0_0
lowercase_ : Any = batch['''text''']
return batch
lowercase_ : List[str] = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase_ : List[str] = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case : int ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowercase_ : str = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
lowercase_ : str = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
lowercase_ : Any = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase_ : Any = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case : Tuple ):
lowercase_ : str = pred.predictions
lowercase_ : Optional[Any] = np.argmax(__snake_case , axis=-1 )
lowercase_ : int = processor.tokenizer.pad_token_id
lowercase_ : List[Any] = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
lowercase_ : Union[str, Any] = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
lowercase_ : List[str] = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase_ : int = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
lowercase_ : Any = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ : List[str] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase_ : List[str] = model_args.model_name_or_path
else:
lowercase_ : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase_ : Tuple = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
lowercase_ : List[str] = train_result.metrics
lowercase_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
lowercase_ : Union[str, Any] = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
lowercase_ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ : Tuple = trainer.evaluate()
lowercase_ : Tuple = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
lowercase_ : List[str] = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 33 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : BigBirdConfig
lowerCamelCase : jnp.dtype =jnp.floataa
lowerCamelCase : bool =True
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().setup()
__lowerCAmelCase : List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =FlaxBigBirdForNaturalQuestionsModule
def snake_case_ (__A : int , __A : Union[str, Any] , __A : int , __A : str , __A : int , __A : List[Any] ) -> Dict:
def cross_entropy(__A : Union[str, Any] , __A : List[str] , __A : Any=None ):
__lowerCAmelCase : List[Any] = logits.shape[-1]
__lowerCAmelCase : Dict = (labels[..., None] == jnp.arange(__A )[None]).astype("""f4""" )
__lowerCAmelCase : Dict = jax.nn.log_softmax(__A , axis=-1 )
__lowerCAmelCase : Optional[int] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCAmelCase : Tuple = reduction(__A )
return loss
__lowerCAmelCase : int = partial(__A , reduction=jnp.mean )
__lowerCAmelCase : Optional[int] = cross_entropy(__A , __A )
__lowerCAmelCase : Union[str, Any] = cross_entropy(__A , __A )
__lowerCAmelCase : List[str] = cross_entropy(__A , __A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str ="google/bigbird-roberta-base"
lowerCamelCase : int =3000
lowerCamelCase : int =1_0500
lowerCamelCase : int =128
lowerCamelCase : int =3
lowerCamelCase : int =1
lowerCamelCase : int =5
# tx_args
lowerCamelCase : float =3e-5
lowerCamelCase : float =0.0
lowerCamelCase : int =2_0000
lowerCamelCase : float =0.0095
lowerCamelCase : str ="bigbird-roberta-natural-questions"
lowerCamelCase : str ="training-expt"
lowerCamelCase : str ="data/nq-training.jsonl"
lowerCamelCase : str ="data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = os.path.join(self.base_dir , self.save_dir )
__lowerCAmelCase : Optional[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int =4096 # no dynamic padding on TPUs
def __call__( self : str , lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.collate_fn(lowerCAmelCase )
__lowerCAmelCase : List[str] = jax.tree_util.tree_map(lowerCAmelCase , lowerCAmelCase )
return batch
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
__lowerCAmelCase : int = {
"""input_ids""": jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : list ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [self._fetch_inputs(lowerCAmelCase ) for ids in input_ids]
return zip(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : list ) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [1 for _ in range(len(lowerCAmelCase ) )]
while len(lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ (__A : int , __A : List[Any] , __A : str=None ) -> Dict:
if seed is not None:
__lowerCAmelCase : Optional[Any] = dataset.shuffle(seed=__A )
for i in range(len(__A ) // batch_size ):
__lowerCAmelCase : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__A )
@partial(jax.pmap , axis_name="""batch""" )
def snake_case_ (__A : Optional[Any] , __A : Dict , **__A : str ) -> List[str]:
def loss_fn(__A : Any ):
__lowerCAmelCase : Dict = model_inputs.pop("""start_labels""" )
__lowerCAmelCase : str = model_inputs.pop("""end_labels""" )
__lowerCAmelCase : int = model_inputs.pop("""pooled_labels""" )
__lowerCAmelCase : List[str] = state.apply_fn(**__A , params=__A , dropout_rng=__A , train=__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = outputs
return state.loss_fn(
__A , __A , __A , __A , __A , __A , )
__lowerCAmelCase ,__lowerCAmelCase : Any = jax.random.split(__A )
__lowerCAmelCase : Optional[int] = jax.value_and_grad(__A )
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = grad_fn(state.params )
__lowerCAmelCase : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__lowerCAmelCase : Optional[Any] = jax.lax.pmean(__A , """batch""" )
__lowerCAmelCase : Dict = state.apply_gradients(grads=__A )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def snake_case_ (__A : int , **__A : Optional[Any] ) -> Any:
__lowerCAmelCase : List[Any] = model_inputs.pop("""start_labels""" )
__lowerCAmelCase : Tuple = model_inputs.pop("""end_labels""" )
__lowerCAmelCase : Union[str, Any] = model_inputs.pop("""pooled_labels""" )
__lowerCAmelCase : List[Any] = state.apply_fn(**__A , params=state.params , train=__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[Any] = outputs
__lowerCAmelCase : Optional[Any] = state.loss_fn(__A , __A , __A , __A , __A , __A )
__lowerCAmelCase : int = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class SCREAMING_SNAKE_CASE ( train_state.TrainState ):
"""simple docstring"""
lowerCamelCase : Callable =struct.field(pytree_node=a_ )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Args
lowerCamelCase : Callable
lowerCamelCase : Callable
lowerCamelCase : Callable
lowerCamelCase : Callable
lowerCamelCase : wandb
lowerCamelCase : Callable =None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Any=None ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = model.params
__lowerCAmelCase : Optional[Any] = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , loss_fn=lowerCAmelCase , )
if ckpt_dir is not None:
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = restore_checkpoint(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__lowerCAmelCase ,__lowerCAmelCase : List[str] = build_tx(**lowerCAmelCase )
__lowerCAmelCase : str = train_state.TrainState(
step=lowerCAmelCase , apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , opt_state=lowerCAmelCase , )
__lowerCAmelCase : Union[str, Any] = args
__lowerCAmelCase : Tuple = data_collator
__lowerCAmelCase : Optional[Any] = lr
__lowerCAmelCase : int = params
__lowerCAmelCase : Any = jax_utils.replicate(lowerCAmelCase )
return state
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : int = self.args
__lowerCAmelCase : Optional[int] = len(lowerCAmelCase ) // args.batch_size
__lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
__lowerCAmelCase : Tuple = jax.random.split(lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCAmelCase : Tuple = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase : List[Any] = get_batched_dataset(lowerCAmelCase , args.batch_size , seed=lowerCAmelCase )
__lowerCAmelCase : int = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc=f'''Running EPOCH-{epoch}''' ):
__lowerCAmelCase : Tuple = self.data_collator(lowerCAmelCase )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = self.train_step_fn(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__lowerCAmelCase : Union[str, Any] = jax_utils.unreplicate(state.step )
__lowerCAmelCase : Optional[int] = running_loss.item() / i
__lowerCAmelCase : Tuple = self.scheduler_fn(state_step - 1 )
__lowerCAmelCase : List[Any] = self.evaluate(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(lowerCAmelCase ) )
self.logger.log(lowerCAmelCase , commit=lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = get_batched_dataset(lowerCAmelCase , self.args.batch_size )
__lowerCAmelCase : Optional[int] = len(lowerCAmelCase ) // self.args.batch_size
__lowerCAmelCase : str = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase : Optional[int] = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc="""Evaluating ... """ ):
__lowerCAmelCase : List[Any] = self.data_collator(lowerCAmelCase )
__lowerCAmelCase : Dict = self.val_step_fn(lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = jax_utils.unreplicate(lowerCAmelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(lowerCAmelCase , params=state.params )
with open(os.path.join(lowerCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(lowerCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , lowerCAmelCase )
print("""DONE""" )
def snake_case_ (__A : int , __A : List[str] ) -> int:
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(__A , """flax_model.msgpack""" ) , """rb""" ) as f:
__lowerCAmelCase : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """rb""" ) as f:
__lowerCAmelCase : Union[str, Any] = from_bytes(state.opt_state , f.read() )
__lowerCAmelCase : Optional[int] = joblib.load(os.path.join(__A , """args.joblib""" ) )
__lowerCAmelCase : Optional[Any] = joblib.load(os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """r""" ) as f:
__lowerCAmelCase : List[str] = json.load(__A )
__lowerCAmelCase : Any = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def snake_case_ (__A : Any , __A : List[Any] , __A : str , __A : List[str] ) -> Dict:
__lowerCAmelCase : int = num_train_steps - warmup_steps
__lowerCAmelCase : Any = optax.linear_schedule(init_value=__A , end_value=__A , transition_steps=__A )
__lowerCAmelCase : Optional[int] = optax.linear_schedule(init_value=__A , end_value=1e-7 , transition_steps=__A )
__lowerCAmelCase : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ (__A : Dict , __A : Tuple , __A : str , __A : Tuple , __A : Optional[int] ) -> List[Any]:
def weight_decay_mask(__A : Union[str, Any] ):
__lowerCAmelCase : Optional[Any] = traverse_util.flatten_dict(__A )
__lowerCAmelCase : Any = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__A )
__lowerCAmelCase : int = scheduler_fn(__A , __A , __A , __A )
__lowerCAmelCase : Any = optax.adamw(learning_rate=__A , weight_decay=__A , mask=__A )
return tx, lr
| 139 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"audio": Audio()} )
lowerCamelCase : ClassVar[Features] =Features({"labels": ClassLabel} )
lowerCamelCase : str ="audio"
lowerCamelCase : str ="labels"
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__lowerCAmelCase : Tuple = copy.deepcopy(self )
__lowerCAmelCase : Tuple = self.label_schema.copy()
__lowerCAmelCase : Union[str, Any] = features[self.label_column]
__lowerCAmelCase : Any = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 139 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__A ='''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__A ='''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__A ='''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(lowercase , lowercase , sample_weight=lowercase ) ),
}
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE__ ( __a ):
if "://" in dataset_path:
snake_case_ : Any = dataset_path.split('://' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE__ ( __a ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Union[str, Any] = not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) )
else:
fs.mv(__a , __a , recursive=__a )
def SCREAMING_SNAKE_CASE__ ( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case_ : Any = None
snake_case_ : Optional[int] = None
snake_case_ : int = threading.Lock()
| 88 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int = MBartConfig
__magic_name__: str = {}
__magic_name__: Union[str, Any] = "gelu"
def __init__( self : List[str] , _A : Optional[int] , _A : List[Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : Tuple=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : str=2 , _A : str=4 , _A : Tuple=37 , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=20 , _A : Dict=2 , _A : List[str]=1 , _A : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Union[str, Any] = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int ) -> str:
"""simple docstring"""
snake_case_ : Dict = TFMBartModel(config=_A ).get_decoder()
snake_case_ : Any = inputs_dict['input_ids']
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict['attention_mask'][:1, :]
snake_case_ : Tuple = inputs_dict['head_mask']
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
snake_case_ ,snake_case_ : str = outputs.to_tuple()
snake_case_ : int = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__magic_name__: int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__magic_name__: Union[str, Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__: Tuple = True
__magic_name__: Tuple = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : int , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFMBartModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__magic_name__: Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__magic_name__: List[Any] = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : List[str] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
snake_case_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 88 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''spiece.model'''}
__A = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__A = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__A = '''▁'''
class snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Optional[int]="[SEP]" , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : List[str]="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Dict="[CLS]" , UpperCamelCase__ : List[str]="[MASK]" , UpperCamelCase__ : int = None , **UpperCamelCase__ : Optional[int] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: str = (
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase)
if isinstance(_lowerCamelCase , _lowerCamelCase)
else mask_token
)
__lowerCAmelCase: str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__lowerCAmelCase: List[Any] = do_lower_case
__lowerCAmelCase: int = remove_space
__lowerCAmelCase: List[Any] = keep_accents
__lowerCAmelCase: Optional[int] = vocab_file
__lowerCAmelCase: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCamelCase)
@property
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
return len(self.sp_model)
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.__dict__.copy()
__lowerCAmelCase: int = None
return state
def __setstate__( self : Any , UpperCamelCase__ : List[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__lowerCAmelCase: Any = {}
__lowerCAmelCase: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
if self.remove_space:
__lowerCAmelCase: int = ''' '''.join(inputs.strip().split())
else:
__lowerCAmelCase: Tuple = inputs
__lowerCAmelCase: Tuple = outputs.replace("``" , "\"").replace("\'\'" , "\"")
if not self.keep_accents:
__lowerCAmelCase: List[str] = unicodedata.normalize("NFKD" , _lowerCamelCase)
__lowerCAmelCase: List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase)])
if self.do_lower_case:
__lowerCAmelCase: Tuple = outputs.lower()
return outputs
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.preprocess_text(_lowerCamelCase)
__lowerCAmelCase: int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase)
__lowerCAmelCase: Tuple = []
for piece in pieces:
if len(_lowerCamelCase) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
__lowerCAmelCase: int = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCAmelCase: List[str] = cur_pieces[1:]
else:
__lowerCAmelCase: Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCamelCase)
else:
new_pieces.append(_lowerCamelCase)
return new_pieces
def lowercase_ ( self : str , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCamelCase)
def lowercase_ ( self : int , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCamelCase)
def lowercase_ ( self : List[str] , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = []
__lowerCAmelCase: List[Any] = ''''''
__lowerCAmelCase: str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase) + token
__lowerCAmelCase: Optional[int] = True
__lowerCAmelCase: str = []
else:
current_sub_tokens.append(_lowerCamelCase)
__lowerCAmelCase: Optional[int] = False
out_string += self.sp_model.decode(_lowerCamelCase)
return out_string.strip()
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any = None)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = [self.sep_token_id]
__lowerCAmelCase: str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any] = False)-> List[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase)) + [1] + ([0] * len(_lowerCamelCase)) + [1]
return [1] + ([0] * len(_lowerCamelCase)) + [1]
def lowercase_ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] = None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = [self.sep_token_id]
__lowerCAmelCase: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any = None)-> Optional[Any]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCAmelCase: Dict = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCamelCase , "wb") as fi:
__lowerCAmelCase: Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase)
return (out_vocab_file,)
| 217 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a , a :int = 1, 1
a :Any = 2
while True:
a :Optional[int] = 0
a :str = fa + fa
a , a :List[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( a_ ):
def __init__( self , *__snake_case , **__snake_case ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 218 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , ):
lowercase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : Optional[int] =DPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowercase = DPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 220 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
class __A ( a ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 262 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase__ : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase__ : int ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : List[str] ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCAmelCase__ : Any ={'''mobilebert-uncased''': 5_12}
UpperCAmelCase__ : Any ={}
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = MobileBertTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_="[UNK]" , UpperCAmelCase_="[SEP]" , UpperCAmelCase_="[PAD]" , UpperCAmelCase_="[CLS]" , UpperCAmelCase_="[MASK]" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase =getattr(UpperCAmelCase_ , normalizer_state.pop("""type""" ) )
lowerCamelCase =do_lower_case
lowerCamelCase =strip_accents
lowerCamelCase =tokenize_chinese_chars
lowerCamelCase =normalizer_class(**UpperCAmelCase_ )
lowerCamelCase =do_lower_case
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
lowerCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 262 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = botoa.client('iam' )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ , AssumeRolePolicyDocument=json.dumps(lowercase_ , indent=2 ) )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowercase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = botoa.client('iam' )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def _lowerCAmelCase ( ):
UpperCAmelCase = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , lowercase_ , )
UpperCAmelCase = None
if credentials_configuration == 0:
UpperCAmelCase = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
UpperCAmelCase = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
UpperCAmelCase = _ask_field('AWS Access Key ID: ' )
UpperCAmelCase = aws_access_key_id
UpperCAmelCase = _ask_field('AWS Secret Access Key: ' )
UpperCAmelCase = aws_secret_access_key
UpperCAmelCase = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
UpperCAmelCase = aws_region
UpperCAmelCase = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , lowercase_ , )
if role_management == 0:
UpperCAmelCase = _ask_field('Enter your IAM role name: ' )
else:
UpperCAmelCase = """accelerate_sagemaker_execution_role"""
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowercase_ )
UpperCAmelCase = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_custom_docker_image:
UpperCAmelCase = _ask_field('Enter your Docker image: ' , lambda lowercase_ : str(lowercase_ ).lower() )
UpperCAmelCase = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda lowercase_ : str(lowercase_ ).lower() , )
UpperCAmelCase = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda lowercase_ : str(lowercase_ ).lower() , )
UpperCAmelCase = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase = {}
UpperCAmelCase = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
if use_dynamo:
UpperCAmelCase = """dynamo_"""
UpperCAmelCase = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
if use_custom_options:
UpperCAmelCase = _ask_options(
'Which mode do you want to use?' , lowercase_ , lambda lowercase_ : TORCH_DYNAMO_MODES[int(lowercase_ )] , default='default' , )
UpperCAmelCase = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase = _ask_options(
lowercase_ , lowercase_ , lambda lowercase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase = _ask_field(lowercase_ , lambda lowercase_ : str(lowercase_ ).lower() , default='ml.p3.2xlarge' )
UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase = _ask_field(
'How many machines do you want use? [1]: ' , lowercase_ , default=1 , )
UpperCAmelCase = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=lowercase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase_ , use_cpu=lowercase_ , dynamo_config=lowercase_ , eca_instance_type=lowercase_ , profile=lowercase_ , region=lowercase_ , iam_role_name=lowercase_ , mixed_precision=lowercase_ , num_machines=lowercase_ , sagemaker_inputs_file=lowercase_ , sagemaker_metrics_file=lowercase_ , )
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =XGLMConfig
lowerCamelCase : Optional[Any] ={}
lowerCamelCase : Optional[Any] ="gelu"
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=14 , lowerCAmelCase : Any=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : Optional[int]=0.02 , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[int] = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : str = d_model
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = ffn_dim
__lowerCAmelCase : List[str] = activation_function
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Any = 0
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Optional[int] = 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCAmelCase : Any = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : int = self.get_config()
__lowerCAmelCase : List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : str = config_and_inputs
__lowerCAmelCase : List[str] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : Dict =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any =(
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : Tuple =False
lowerCamelCase : Optional[int] =False
lowerCamelCase : Optional[int] =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = TFXGLMModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[str] = TFXGLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : List[Any]=True ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCAmelCase : Dict = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCAmelCase : Union[str, Any] = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowerCAmelCase : str = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
__lowerCAmelCase : str = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
__lowerCAmelCase : Union[str, Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
__lowerCAmelCase : Optional[int] = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase , seed=[7, 0] )
__lowerCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Any = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowerCAmelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowerCAmelCase : List[str] = """left"""
# use different length sentences to test batching
__lowerCAmelCase : Union[str, Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""tf""" , padding=lowerCAmelCase )
__lowerCAmelCase : str = inputs["""input_ids"""]
__lowerCAmelCase : Tuple = model.generate(input_ids=lowerCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
__lowerCAmelCase : str = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Any = model.generate(input_ids=lowerCAmelCase , max_new_tokens=12 )
__lowerCAmelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(input_ids=lowerCAmelCase , max_new_tokens=12 )
__lowerCAmelCase : Dict = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : str = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
| 139 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =AutoencoderKL
lowerCamelCase : Tuple ="sample"
lowerCamelCase : Dict =1e-2
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = 4
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Optional[Any] = (32, 32)
__lowerCAmelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : Dict = self.model_class(**lowerCAmelCase )
model.to(lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
__lowerCAmelCase : str = model(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowerCAmelCase : Any = torch.randn_like(lowerCAmelCase )
__lowerCAmelCase : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowerCAmelCase : List[str] = self.model_class(**lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowerCAmelCase : Any = model_a(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowerCAmelCase : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__lowerCAmelCase : int = dict(model.named_parameters() )
__lowerCAmelCase : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase )
__lowerCAmelCase : int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__lowerCAmelCase : Optional[Any] = model.to(lowerCAmelCase )
model.eval()
if torch_device == "mps":
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
else:
__lowerCAmelCase : Any = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
__lowerCAmelCase : Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCAmelCase : Optional[int] = image.to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , sample_posterior=lowerCAmelCase , generator=lowerCAmelCase ).sample
__lowerCAmelCase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowerCAmelCase : List[str] = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__lowerCAmelCase : Tuple = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> int:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : Any=(4, 3, 5_12, 5_12) , lowerCAmelCase : Any=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
__lowerCAmelCase : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) ).to(lowerCAmelCase ).to(lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" , lowerCAmelCase : int=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """fp16""" if fpaa else None
__lowerCAmelCase : List[str] = torch.floataa if fpaa else torch.floataa
__lowerCAmelCase : Dict = AutoencoderKL.from_pretrained(
lowerCAmelCase , subfolder="""vae""" , torch_dtype=lowerCAmelCase , revision=lowerCAmelCase , )
model.to(lowerCAmelCase ).eval()
return model
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Tuple=0 ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase )
return torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_sd_vae_model()
__lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase )
__lowerCAmelCase : List[str] = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCAmelCase : List[str] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : Tuple = self.get_sd_image(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Dict = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCAmelCase : Optional[int] = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model()
__lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCAmelCase : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_sd_vae_model()
__lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__lowerCAmelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
__lowerCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : str = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Dict = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__lowerCAmelCase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCAmelCase : int = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_sd_vae_model()
__lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCAmelCase : Tuple = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_sd_vae_model()
__lowerCAmelCase : List[str] = self.get_sd_image(lowerCAmelCase )
__lowerCAmelCase : Any = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.encode(lowerCAmelCase ).latent_dist
__lowerCAmelCase : Union[str, Any] = dist.sample(generator=lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowerCAmelCase : Any = sample[0, -1, -3:, -3:].flatten().cpu()
__lowerCAmelCase : int = torch.tensor(lowerCAmelCase )
__lowerCAmelCase : str = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase )
| 139 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_lowerCamelCase : Any = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_lowerCamelCase : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
_lowerCamelCase : List[str] = BeautifulSoup(res.text, 'html.parser')
_lowerCamelCase : Dict = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 258 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 1 |
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[int] = """unispeech-sat"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict=32 , _lowerCamelCase : Optional[Any]=7_68 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : str=30_72 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Tuple=1E-5 , _lowerCamelCase : Optional[Any]="group" , _lowerCamelCase : str="gelu" , _lowerCamelCase : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _lowerCamelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase : Tuple=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=1_28 , _lowerCamelCase : str=16 , _lowerCamelCase : int=False , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=0.05 , _lowerCamelCase : int=10 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Any=10 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : str=3_20 , _lowerCamelCase : int=2 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=1_00 , _lowerCamelCase : List[Any]=2_56 , _lowerCamelCase : str=2_56 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Any="mean" , _lowerCamelCase : str=False , _lowerCamelCase : int=False , _lowerCamelCase : int=2_56 , _lowerCamelCase : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , _lowerCamelCase : str=(5, 3, 3, 1, 1) , _lowerCamelCase : List[Any]=(1, 2, 3, 1, 1) , _lowerCamelCase : Tuple=5_12 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[Any]=5_04 , **_lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
A_ : Any = hidden_size
A_ : Optional[Any] = feat_extract_norm
A_ : str = feat_extract_activation
A_ : Optional[Any] = list(_lowerCamelCase )
A_ : Tuple = list(_lowerCamelCase )
A_ : Dict = list(_lowerCamelCase )
A_ : int = conv_bias
A_ : Tuple = num_conv_pos_embeddings
A_ : Dict = num_conv_pos_embedding_groups
A_ : Optional[Any] = len(self.conv_dim )
A_ : Optional[int] = num_hidden_layers
A_ : Optional[int] = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[str] = num_attention_heads
A_ : Dict = hidden_dropout
A_ : Union[str, Any] = attention_dropout
A_ : Dict = activation_dropout
A_ : str = feat_proj_dropout
A_ : Optional[int] = final_dropout
A_ : str = layerdrop
A_ : Dict = layer_norm_eps
A_ : List[str] = initializer_range
A_ : str = vocab_size
A_ : Tuple = num_clusters
A_ : Dict = do_stable_layer_norm
A_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Optional[int] = apply_spec_augment
A_ : Dict = mask_time_prob
A_ : List[Any] = mask_time_length
A_ : str = mask_time_min_masks
A_ : List[str] = mask_feature_prob
A_ : int = mask_feature_length
A_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : str = num_codevector_groups
A_ : List[str] = contrastive_logits_temperature
A_ : Tuple = feat_quantizer_dropout
A_ : Dict = num_negatives
A_ : Union[str, Any] = codevector_dim
A_ : Optional[int] = proj_codevector_dim
A_ : Union[str, Any] = diversity_loss_weight
# ctc loss
A_ : List[str] = ctc_loss_reduction
A_ : Dict = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(_lowerCamelCase )
A_ : Any = list(_lowerCamelCase )
A_ : Dict = list(_lowerCamelCase )
A_ : List[str] = xvector_output_dim
@property
def a_ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 167 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'spiece.model'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Union[str, Any] = 4
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = """left"""
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[Any]="<sep>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Dict="<cls>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Optional[int]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = 3
A_ : Union[str, Any] = do_lower_case
A_ : Tuple = remove_space
A_ : int = keep_accents
A_ : Optional[Any] = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def a_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : str = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : List[Any] = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.remove_space:
A_ : str = ''' '''.join(inputs.strip().split() )
else:
A_ : Any = inputs
A_ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A_ : Any = unicodedata.normalize('''NFKD''' , _lowerCamelCase )
A_ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : str = outputs.lower()
return outputs
def a_ ( self : List[str] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.preprocess_text(_lowerCamelCase )
A_ : int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Tuple = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def a_ ( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Any = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
A_ : List[str] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Any = []
A_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : int = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Dict = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def a_ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 167 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = BioGptTokenizer
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(UpperCAmelCase__))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
A__ = '''lower newer'''
A__ = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = BioGptTokenizer(self.vocab_file , self.merges_file)
A__ = '''lower'''
A__ = ['''low''', '''er</w>''']
A__ = tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokens + ['''<unk>''']
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
A__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__)
A__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__)
A__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__)
A__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 231 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase_ : Tuple = Vector()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase_ ) , "(0,0,0,0,0,1)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase_ ) , 4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([1, 2] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase_ : str = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Vector([1, 2, 3] )
UpperCAmelCase_ : List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : int = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase_ : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase_ , lowercase_ ) ) , "(3,4,7)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[Any] = x.copy()
self.assertEqual(str(lowercase_ ) , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase_ ) , "(0,1,0)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase_ , lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase_ , lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase_ : int = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 61 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCAmelCase ( __snake_case : str , __snake_case : complex , __snake_case : str = "x" , __snake_case : float = 10**-10 , __snake_case : int = 1 , ) -> complex:
__A : int = symbols(__snake_case )
__A : Tuple = lambdify(__snake_case , __snake_case )
__A : Any = lambdify(__snake_case , diff(__snake_case , __snake_case ) )
__A : str = starting_point
while True:
if diff_function(__snake_case ) != 0:
__A : Optional[Any] = prev_guess - multiplicity * func(__snake_case ) / diff_function(
__snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""") | 190 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase_ : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : List[str] ) -> List[str]:
for attribute in key.split("." ):
_a = getattr(lowercase , lowercase )
if weight_type is not None:
_a = getattr(lowercase , lowercase ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase ( lowercase : Tuple , lowercase : str ) -> List[str]:
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.feature_extractor
_a = hf_model.adapter
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
_a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(lowercase , lowercase , lowercase , lowercase )
_a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(lowercase )[0].split("." )[-2]
_a = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
else:
_a = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase ( lowercase : Dict , lowercase : int , lowercase : Any , lowercase : List[str] , lowercase : str ) -> List[Any]:
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Any , lowercase : List[str] ) -> List[str]:
_a = full_name.split("adaptor." )[-1]
_a = name.split("." )
if items[1].isdigit():
_a = int(items[1] )
else:
_a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
_a = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
_a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
_a = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
_a = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowercase , lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
_a = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
_a = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
def _lowerCamelCase ( lowercase : Tuple ) -> str:
_a , _a = emb.weight.shape
_a = nn.Linear(lowercase , lowercase , bias=lowercase )
_a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int , lowercase : Optional[int] , ) -> Union[str, Any]:
_a = WavaVecaConfig.from_pretrained(
lowercase , add_adapter=lowercase , adapter_stride=lowercase , adapter_kernel_size=lowercase , use_auth_token=lowercase , output_hidden_size=lowercase , )
_a = MBartConfig.from_pretrained(lowercase )
# load model
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
_a = model[0].eval()
# load feature extractor
_a = WavaVecaFeatureExtractor.from_pretrained(lowercase , use_auth_token=lowercase )
# set weights for wav2vec2 encoder
_a = WavaVecaModel(lowercase )
recursively_load_weights_wavaveca(model.encoder , lowercase )
# load decoder weights
_a = MBartForCausalLM(lowercase )
_a , _a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
_a = SpeechEncoderDecoderModel(encoder=lowercase , decoder=lowercase )
_a = False
_a = MBartaaTokenizer(lowercase )
tokenizer.save_pretrained(lowercase )
_a = hf_wavavec.config.to_dict()
_a = tokenizer.pad_token_id
_a = tokenizer.bos_token_id
_a = tokenizer.eos_token_id
_a = "mbart50"
_a = "wav2vec2"
_a = tokenizer.eos_token_id
_a = 25_0004
_a = tokenizer.eos_token_id
_a = SpeechEncoderDecoderConfig.from_dict(lowercase )
hf_wavavec.save_pretrained(lowercase )
feature_extractor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_UpperCAmelCase : Dict = 3
def A ( lowercase ) -> int:
'''simple docstring'''
print('Generating primitive root of p' )
while True:
UpperCamelCase = random.randrange(3 , lowercase )
if pow(lowercase , 2 , lowercase ) == 1:
continue
if pow(lowercase , lowercase , lowercase ) == 1:
continue
return g
def A ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('Generating prime p...' )
UpperCamelCase = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
UpperCamelCase = primitive_root(lowercase ) # one primitive root on modulo p.
UpperCamelCase = random.randrange(3 , lowercase ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase = cryptomath.find_mod_inverse(pow(lowercase , lowercase , lowercase ) , lowercase )
UpperCamelCase = (key_size, e_a, e_a, p)
UpperCamelCase = (key_size, d)
return public_key, private_key
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCamelCase , UpperCamelCase = generate_key(lowercase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def A ( ) -> None:
'''simple docstring'''
print('Making key files...' )
make_key_files('elgamal' , 2_048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 222 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "informer"
a__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Any , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = None , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : str = "prob" , __lowerCamelCase : int = 5 , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> str:
# time series specific configuration
A : List[Any] = prediction_length
A : Any = context_length or prediction_length
A : Optional[Any] = distribution_output
A : List[str] = loss
A : Union[str, Any] = input_size
A : List[Any] = num_time_features
A : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A : str = scaling
A : str = num_dynamic_real_features
A : Any = num_static_real_features
A : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A : Dict = cardinality
else:
A : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A : Optional[Any] = embedding_dimension
else:
A : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
A : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
A : List[Any] = d_model
A : int = encoder_attention_heads
A : int = decoder_attention_heads
A : Optional[Any] = encoder_ffn_dim
A : int = decoder_ffn_dim
A : List[str] = encoder_layers
A : List[str] = decoder_layers
A : int = dropout
A : Union[str, Any] = attention_dropout
A : int = activation_dropout
A : Any = encoder_layerdrop
A : Union[str, Any] = decoder_layerdrop
A : Any = activation_function
A : Dict = init_std
A : Optional[int] = use_cache
# Informer
A : Dict = attention_type
A : int = sampling_factor
A : Union[str, Any] = distil
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 356 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 256 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple, _snake_case : int, _snake_case : Union[str, Any]=1_3, _snake_case : int=7, _snake_case : Tuple=True, _snake_case : Union[str, Any]=True, _snake_case : Dict=True, _snake_case : Optional[Any]=True, _snake_case : Tuple=9_9, _snake_case : Any=6_4, _snake_case : str=3_2, _snake_case : Optional[int]=5, _snake_case : Any=4, _snake_case : str=3_7, _snake_case : List[Any]="gelu", _snake_case : Any=0.1, _snake_case : List[str]=0.1, _snake_case : Union[str, Any]=5_1_2, _snake_case : int=1_6, _snake_case : Tuple=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : str=3, _snake_case : str=4, _snake_case : Optional[int]=None, ) ->List[str]:
snake_case__ : List[Any] = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Dict = is_training
snake_case__ : int = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : str = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Any = embedding_size
snake_case__ : Any = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : str = num_labels
snake_case__ : int = num_choices
snake_case__ : Any = scope
def lowercase_ ( self : int ) ->Optional[Any]:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
if self.use_token_type_ids:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Dict = None
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_snake_case, initializer_range=self.initializer_range, )
def lowercase_ ( self : Any, _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], _snake_case : List[str] ) ->Tuple:
snake_case__ : Union[str, Any] = MobileBertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : List[str] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->int:
snake_case__ : Union[str, Any] = MobileBertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Union[str, Any], _snake_case : List[str], _snake_case : Tuple, _snake_case : List[str], _snake_case : List[str], _snake_case : Optional[int], _snake_case : Union[str, Any], _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : List[Any] = MobileBertForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def lowercase_ ( self : str, _snake_case : str, _snake_case : Union[str, Any], _snake_case : Any, _snake_case : Any, _snake_case : int, _snake_case : Dict, _snake_case : Any ) ->Union[str, Any]:
snake_case__ : str = MobileBertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : int = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, next_sentence_label=_snake_case, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : Optional[Any], _snake_case : List[Any], _snake_case : int, _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Union[str, Any] ) ->Dict:
snake_case__ : Any = MobileBertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : str = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, start_positions=_snake_case, end_positions=_snake_case, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[Any], _snake_case : Optional[int], _snake_case : Tuple, _snake_case : str, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : List[str] ) ->Any:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : List[str] = MobileBertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : str = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : int, _snake_case : List[Any], _snake_case : Union[str, Any], _snake_case : Dict, _snake_case : int, _snake_case : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any] ) ->Optional[Any]:
snake_case__ : Dict = self.num_labels
snake_case__ : Optional[int] = MobileBertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : str, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Dict, _snake_case : Tuple ) ->Optional[Any]:
snake_case__ : List[str] = self.num_choices
snake_case__ : Optional[int] = MobileBertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : Tuple = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase_ ( self : Union[str, Any] ) ->str:
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
snake_case__
) : Union[str, Any] = config_and_inputs
snake_case__ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def lowercase_ ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : List[str], _snake_case : Tuple=False ) ->List[Any]:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
snake_case__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=_snake_case )
snake_case__ : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : int = MobileBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self, config_class=_snake_case, hidden_size=3_7 )
def lowercase_ ( self : Dict ) ->Optional[int]:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->str:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_snake_case )
def lowercase_ ( self : List[Any] ) ->List[Any]:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_snake_case )
def lowercase_ ( self : Dict ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_snake_case )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_snake_case )
def lowercase_ ( self : str ) ->str:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_snake_case )
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_snake_case )
def lowercase_ (A : Any ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
a_ :Any = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_snake_case )
snake_case__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case__ : Any = model(_snake_case )[0]
snake_case__ : Tuple = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, _snake_case )
snake_case__ : List[Any] = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
], device=_snake_case, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case__ : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
snake_case__ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
import socket
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase = socket.gethostname()
lowercase = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(__a )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 371 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32 | 0 |
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {} # Mapping from char to TrieNode
UpperCamelCase__ = False
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
for word in words:
self.insert(UpperCamelCase__ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self
for char in word:
if char not in curr.nodes:
UpperCamelCase__ = TrieNode()
UpperCamelCase__ = curr.nodes[char]
UpperCamelCase__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase__ = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
def _delete(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase__ = False
return len(curr.nodes ) == 0
UpperCamelCase__ = word[index]
UpperCamelCase__ = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase__ = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def __magic_name__ ( __a : TrieNode , __a : str ):
'''simple docstring'''
if node.is_leaf:
print(__a , end=""" """ )
for key, value in node.nodes.items():
print_words(__a , word + key )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = "banana bananas bandana band apple all beast".split()
UpperCamelCase__ = TrieNode()
root.insert_many(__a )
# print_words(root, "")
assert all(root.find(__a ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __magic_name__ ( __a : str , __a : bool ):
'''simple docstring'''
print(str(__a ) , """works!""" if passes else """doesn't work :(""" )
def __magic_name__ ( ):
'''simple docstring'''
assert test_trie()
def __magic_name__ ( ):
'''simple docstring'''
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 244 |
"""simple docstring"""
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = []
snake_case : List[Any] = 1
while len(lowercase ) < 1e6:
constant.append(str(lowercase ) )
i += 1
snake_case : Tuple = "".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 203 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__UpperCAmelCase).to(__UpperCAmelCase)
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("google/mt5-small")
SCREAMING_SNAKE_CASE_: Any = tokenizer("Hello there" , return_tensors="pt").input_ids
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer("Hi I am" , return_tensors="pt").input_ids
SCREAMING_SNAKE_CASE_: str = model(input_ids.to(__UpperCAmelCase) , labels=labels.to(__UpperCAmelCase)).loss
SCREAMING_SNAKE_CASE_: int = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_: Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 366 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[int] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""YolosFeatureExtractor"""]
lowerCAmelCase : Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : str , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[str] = 1_3
_UpperCAmelCase : Tuple = 7
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = 9_9
_UpperCAmelCase : Dict = 3_2
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : str = 3_7
_UpperCAmelCase : Union[str, Any] = "gelu"
_UpperCAmelCase : Tuple = 0.1
_UpperCAmelCase : List[str] = 0.1
_UpperCAmelCase : Optional[int] = 5_1_2
_UpperCAmelCase : List[Any] = 1_6
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = 0.02
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : str = None
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : int = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = TFEsmModel(config=_a )
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Optional[int] = model(_a )
_UpperCAmelCase : Union[str, Any] = [input_ids, input_mask]
_UpperCAmelCase : Union[str, Any] = model(_a )
_UpperCAmelCase : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Tuple = TFEsmModel(config=_a )
_UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_UpperCAmelCase : int = model(_a )
_UpperCAmelCase : List[str] = [input_ids, input_mask]
_UpperCAmelCase : str = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
_UpperCAmelCase : Optional[int] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = TFEsmForMaskedLM(config=_a )
_UpperCAmelCase : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Optional[Any] = TFEsmForTokenClassification(config=_a )
_UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Union[str, Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = TFEsmModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCAmelCase : List[Any] = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
_UpperCAmelCase : int = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : int = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : List[Any] = model(_a )[0]
_UpperCAmelCase : List[Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
_UpperCAmelCase : Union[str, Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : Any = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_UpperCAmelCase : Optional[Any] = model(_a )[0]
# compare the actual values for a slice.
_UpperCAmelCase : int = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 145 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase_ = "src/transformers"
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowerCamelCase_ = re.compile(r"^\s*try:")
# Catches a line with else:
lowerCamelCase_ = re.compile(r"^\s*else:")
def __lowerCamelCase ( a_ : List[str] ) -> Any:
if _re_test_backend.search(a_ ) is None:
return None
__SCREAMING_SNAKE_CASE :Optional[int] = [b[0] for b in _re_backend.findall(a_ )]
backends.sort()
return "_and_".join(a_ )
def __lowerCamelCase ( a_ : Dict ) -> List[Any]:
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE :List[str] = f.readlines()
__SCREAMING_SNAKE_CASE :List[str] = 0
while line_index < len(a_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_ ):
return None
# First grab the objects without a specific backend in _import_structure
__SCREAMING_SNAKE_CASE :Optional[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__SCREAMING_SNAKE_CASE :str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_ ):
__SCREAMING_SNAKE_CASE :Optional[int] = _re_one_line_import_struct.search(a_ ).groups()[0]
__SCREAMING_SNAKE_CASE :str = re.findall('''\[([^\]]+)\]''' , a_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__SCREAMING_SNAKE_CASE :int = _re_import_struct_key_value.search(a_ )
if single_line_import_search is not None:
__SCREAMING_SNAKE_CASE :List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(a_ ) > 0]
objects.extend(a_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE :str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE :Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE :List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__SCREAMING_SNAKE_CASE :List[str] = lines[line_index]
if _re_import_struct_add_one.search(a_ ) is not None:
objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] )
elif _re_import_struct_add_many.search(a_ ) is not None:
__SCREAMING_SNAKE_CASE :Dict = _re_import_struct_add_many.search(a_ ).groups()[0].split(''', ''' )
__SCREAMING_SNAKE_CASE :Tuple = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_between_brackets.search(a_ ) is not None:
__SCREAMING_SNAKE_CASE :Union[str, Any] = _re_between_brackets.search(a_ ).groups()[0].split(''', ''' )
__SCREAMING_SNAKE_CASE :List[Any] = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_quote_object.search(a_ ) is not None:
objects.append(_re_quote_object.search(a_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE :Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__SCREAMING_SNAKE_CASE :str = []
while (
line_index < len(a_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__SCREAMING_SNAKE_CASE :str = lines[line_index]
__SCREAMING_SNAKE_CASE :str = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE :Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(a_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE :str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE :str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE :Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__SCREAMING_SNAKE_CASE :List[Any] = lines[line_index]
__SCREAMING_SNAKE_CASE :List[str] = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE :str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( a_ : str , a_ : List[Any] ) -> Optional[Any]:
def find_duplicates(a_ : Dict ):
return [k for k, v in collections.Counter(a_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__SCREAMING_SNAKE_CASE :Dict = []
for key in import_dict_objects.keys():
__SCREAMING_SNAKE_CASE :Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__SCREAMING_SNAKE_CASE :str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __lowerCamelCase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE :Dict = []
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
__SCREAMING_SNAKE_CASE :Dict = os.path.join(a_ , '''__init__.py''' )
__SCREAMING_SNAKE_CASE :str = parse_init(a_ )
if objects is not None:
__SCREAMING_SNAKE_CASE :List[Any] = analyze_results(*a_ )
if len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :Tuple = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(a_ ) )
if len(a_ ) > 0:
raise ValueError('''\n\n'''.join(a_ ) )
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :List[Any] = []
for path, directories, files in os.walk(a_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(a_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__SCREAMING_SNAKE_CASE :List[str] = str((Path(a_ ) / folder).relative_to(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(a_ )
for fname in files:
if fname == "__init__.py":
continue
__SCREAMING_SNAKE_CASE :int = str((Path(a_ ) / fname).relative_to(a_ ) )
__SCREAMING_SNAKE_CASE :List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(a_ )
return submodules
lowerCamelCase_ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def __lowerCamelCase ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE :Optional[int] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(a_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__SCREAMING_SNAKE_CASE :List[Any] = spec.loader.load_module()
__SCREAMING_SNAKE_CASE :int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :List[Any] = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 239 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__SCREAMING_SNAKE_CASE :Optional[int] = deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ ,'''r''' ,encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Dict = json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
__SCREAMING_SNAKE_CASE :str = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = config
self.set_stage_and_offload()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_value('''zero_optimization.stage''' ,-1 )
# offload
__SCREAMING_SNAKE_CASE :Any = False
if self.is_zeroa() or self.is_zeroa():
__SCREAMING_SNAKE_CASE :Optional[Any] = set(['''cpu''', '''nvme'''] )
__SCREAMING_SNAKE_CASE :Optional[Any] = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__SCREAMING_SNAKE_CASE :int = True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :Union[str, Any] = ds_key_long.split('''.''' )
__SCREAMING_SNAKE_CASE :List[str] = nodes.pop()
for node in nodes:
__SCREAMING_SNAKE_CASE :Dict = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :str = ds_key_long.split('''.''' )
for node in nodes:
__SCREAMING_SNAKE_CASE :Any = config
__SCREAMING_SNAKE_CASE :int = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._stage == 2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._offload
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = engine
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,device_placement=SCREAMING_SNAKE_CASE__ ,scaler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = hasattr(self.optimizer ,'''overflow''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = params
__SCREAMING_SNAKE_CASE :str = lr
__SCREAMING_SNAKE_CASE :str = weight_decay
__SCREAMING_SNAKE_CASE :int = kwargs
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = optimizer
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_num_steps
__SCREAMING_SNAKE_CASE :Any = warmup_num_steps
__SCREAMING_SNAKE_CASE :int = kwargs | 239 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = 'mobilenet_v1'
def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = min_depth
A__ = hidden_act
A__ = tf_padding
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 68 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir('''fixtures''')
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: str = mock.Mock()
A: Tuple = 5_00
A: Optional[Any] = {}
A: Tuple = HTTPError
A: int = {}
# Download this model to make sure it's in the cache.
A: Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
A: List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A: Dict = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : List[str] ) -> Any:
'''simple docstring'''
A: Any = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls : List[str] ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ) -> Any:
'''simple docstring'''
A: Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-feature-extractor''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
A: int = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A: Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
A: Optional[int] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
A: Tuple = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 334 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __A( A__ ):
snake_case_ = 'sew'
def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case=2 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _snake_case=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _snake_case=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=0 , _snake_case=1 , _snake_case=2 , **_snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 6 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ = json.load(snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ = load_entity_vocab(snake_case__ )
SCREAMING_SNAKE_CASE__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ = LukeModel(config=snake_case__ ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ = (39, 42)
SCREAMING_SNAKE_CASE__ = tokenizer(snake_case__ , entity_spans=[span] , add_prefix_space=snake_case__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ = index
return entity_vocab
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 165 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( __magic_name__ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__magic_name__ , __magic_name__ ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__magic_name__ ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 1.0 , ):
return mean(
function_to_integrate(uniform(__magic_name__ , __magic_name__ ) ) for _ in range(__magic_name__ ) ) * (max_value - min_value)
def _A ( __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 1.0 ):
def identity_function(__magic_name__ ) -> float:
return x
lowercase__ = area_under_curve_estimator(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def _A ( __magic_name__ ):
def function_to_integrate(__magic_name__ ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
__magic_name__ , __magic_name__ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A: Dict = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A: str = logging.WARNING
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = os.getenv("""DATASETS_VERBOSITY""" , UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _snake_case ( ):
return __name__.split(""".""" )[0]
def _snake_case ( ):
return logging.getLogger(_get_library_name() )
def _snake_case ( ):
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _snake_case ( ):
UpperCAmelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _snake_case ( UpperCamelCase : Optional[str] = None ):
if name is None:
UpperCAmelCase : int = _get_library_name()
return logging.getLogger(UpperCamelCase )
def _snake_case ( ):
return _get_library_root_logger().getEffectiveLevel()
def _snake_case ( UpperCamelCase : int ):
_get_library_root_logger().setLevel(UpperCamelCase )
def _snake_case ( ):
return set_verbosity(UpperCamelCase )
def _snake_case ( ):
return set_verbosity(UpperCamelCase )
def _snake_case ( ):
return set_verbosity(UpperCamelCase )
def _snake_case ( ):
return set_verbosity(UpperCamelCase )
def _snake_case ( ):
UpperCAmelCase : Optional[Any] = False
def _snake_case ( ):
UpperCAmelCase : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE__ :
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase : str = args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
def empty_fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> int:
'''simple docstring'''
return self
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return
A: List[Any] = True
class SCREAMING_SNAKE_CASE__ :
def __call__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A: List[Any] = _tqdm_cls()
def _snake_case ( ):
global _tqdm_active
return bool(_tqdm_active )
def _snake_case ( ):
global _tqdm_active
UpperCAmelCase : Optional[Any] = True
def _snake_case ( ):
global _tqdm_active
UpperCAmelCase : Tuple = False
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ''
for i in table:
res += inp[i - 1]
return res
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return data[1:] + data[0]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__ ( __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = int('0b' + data[0] + data[-1] , 2 )
UpperCAmelCase_ : Any = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = message[:4]
UpperCAmelCase_ : Optional[int] = message[4:]
UpperCAmelCase_ : Union[str, Any] = apply_table(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = xor(__snake_case , __snake_case )
UpperCAmelCase_ : Dict = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
UpperCAmelCase_ : Any = apply_sbox(__snake_case , temp[4:] )
UpperCAmelCase_ : str = '0' * (2 - len(__snake_case )) + l # noqa: E741
UpperCAmelCase_ : int = '0' * (2 - len(__snake_case )) + r
UpperCAmelCase_ : int = apply_table(l + r , __snake_case )
UpperCAmelCase_ : int = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase = input('Enter 10 bit key: ')
__UpperCAmelCase = input('Enter 8 bit message: ')
__UpperCAmelCase = [6, 3, 7, 4, 8, 5, 10, 9]
__UpperCAmelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__UpperCAmelCase = [2, 4, 3, 1]
__UpperCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase = apply_table(key, paa_table)
__UpperCAmelCase = temp[:5]
__UpperCAmelCase = temp[5:]
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = apply_table(left + right, pa_table)
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase = apply_table(message, IP)
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = temp[4:] + temp[:4]
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__UpperCAmelCase = apply_table(CT, IP)
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = temp[4:] + temp[:4]
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 145 | 0 |
import re
def a__ ( _UpperCamelCase : str ):
if len(re.findall('''[ATCG]''' ,_UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' ,'''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ (_UpperCAmelCase):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _snake_case ( nn.Module ):
def __init__( self , a , a) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a) , nn.Linear(a , module.out_features , bias=a) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any:
return self.module(a , *a , **a) + self.adapter(a)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : str = 2.109_6595_5269_2574
_lowercase : Any = '''Hello my name is'''
_lowercase : Any = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : Union[str, Any] = 10
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , 'quantization_config'))
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
with self.assertRaises(a), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaises(a):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(a):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa)
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to('cpu')
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
SCREAMING_SNAKE_CASE = 't5-small'
SCREAMING_SNAKE_CASE = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name)
SCREAMING_SNAKE_CASE = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
SCREAMING_SNAKE_CASE = modules
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map='auto')
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a)):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a)
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(a , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _snake_case ( A__ ):
_lowercase : str = '''gpt2-xl'''
_lowercase : Union[str, Any] = 3.3191_8548_5415_2187
| 327 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : int = {'tokenizer_file': 'tokenizer.json'}
lowercase_ : str = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : str = VOCAB_FILES_NAMES
snake_case_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : List[str] = ["input_ids", "attention_mask"]
snake_case_ : List[Any] = None
def __init__( self : Tuple , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , snake_case__ : Tuple="<unk>" , snake_case__ : List[Any]="<s>" , snake_case__ : str="</s>" , snake_case__ : Any="<pad>" , snake_case__ : Optional[Any]=False , snake_case__ : int=False , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , add_prefix_space=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case__ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case__ )
_UpperCAmelCase = add_prefix_space
def UpperCamelCase ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , *snake_case__ : Dict , **snake_case__ : Dict ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : "Conversation" ):
"""simple docstring"""
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 133 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ : str = 'scheduler_config.json'
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : List[str] = 1
snake_case_ : Tuple = 2
snake_case_ : List[Any] = 3
snake_case_ : Union[str, Any] = 4
snake_case_ : Optional[int] = 5
snake_case_ : str = 6
snake_case_ : Any = 7
snake_case_ : List[str] = 8
snake_case_ : Optional[Any] = 9
snake_case_ : Any = 10
snake_case_ : int = 11
snake_case_ : int = 12
snake_case_ : Union[str, Any] = 13
snake_case_ : int = 14
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : torch.FloatTensor
class __lowerCAmelCase :
snake_case_ : List[str] = SCHEDULER_CONFIG_NAME
snake_case_ : Union[str, Any] = []
snake_case_ : str = True
@classmethod
def UpperCamelCase ( cls : List[str] , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[str] = None , snake_case__ : int=False , **snake_case__ : Tuple , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=snake_case__ , subfolder=snake_case__ , return_unused_kwargs=snake_case__ , return_commit_hash=snake_case__ , **snake_case__ , )
return cls.from_config(snake_case__ , return_unused_kwargs=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Union[str, os.PathLike] , snake_case__ : bool = False , **snake_case__ : List[Any] ):
"""simple docstring"""
self.save_config(save_directory=snake_case__ , push_to_hub=snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls : str ):
"""simple docstring"""
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase = [
getattr(snake_case__ , snake_case__ ) for c in compatible_classes_str if hasattr(snake_case__ , snake_case__ )
]
return compatible_classes
| 133 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class _UpperCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , **__UpperCAmelCase : Any ):
'''simple docstring'''
super().__init__(**a__ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(a__ )
def __call__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] = None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
if "text_queries" in kwargs:
_A = kwargs.pop("text_queries" )
if isinstance(a__ , (str, Image.Image) ):
_A = {"image": image, "candidate_labels": candidate_labels}
else:
_A = image
_A = super().__call__(a__ , **a__ )
return results
def lowerCAmelCase ( self : int , **__UpperCAmelCase : Any ):
'''simple docstring'''
_A = {}
if "threshold" in kwargs:
_A = kwargs["threshold"]
if "top_k" in kwargs:
_A = kwargs["top_k"]
return {}, {}, postprocess_params
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = load_image(inputs["image"] )
_A = inputs["candidate_labels"]
if isinstance(a__ , a__ ):
_A = candidate_labels.split("," )
_A = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a__ ):
_A = self.tokenizer(a__ , return_tensors=self.framework )
_A = self.image_processor(a__ , return_tensors=self.framework )
yield {
"is_last": i == len(a__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase ( self : Any , __UpperCAmelCase : int ):
'''simple docstring'''
_A = model_inputs.pop("target_size" )
_A = model_inputs.pop("candidate_label" )
_A = model_inputs.pop("is_last" )
_A = self.model(**a__ )
_A = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
_A = []
for model_output in model_outputs:
_A = model_output["candidate_label"]
_A = BaseModelOutput(a__ )
_A = self.image_processor.post_process_object_detection(
outputs=a__ , threshold=a__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
_A = outputs["scores"][index].item()
_A = self._get_bounding_box(outputs["boxes"][index][0] )
_A = {"score": score, "label": label, "box": box}
results.append(a__ )
_A = sorted(a__ , key=lambda __UpperCAmelCase : x["score"] , reverse=a__ )
if top_k:
_A = results[:top_k]
return results
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
_A , _A , _A , _A = box.int().tolist()
_A = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 354 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : int = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase__ ( UpperCAmelCase__ ):
_UpperCAmelCase :Any = 'speech_to_text'
_UpperCAmelCase :Union[str, Any] = ['past_key_values']
_UpperCAmelCase :List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , snake_case__ : Tuple=1_0000 , snake_case__ : Union[str, Any]=12 , snake_case__ : Dict=2048 , snake_case__ : List[Any]=4 , snake_case__ : Union[str, Any]=6 , snake_case__ : Any=2048 , snake_case__ : Optional[int]=4 , snake_case__ : Any=0.0 , snake_case__ : str=0.0 , snake_case__ : Optional[int]=True , snake_case__ : int=True , snake_case__ : Dict="relu" , snake_case__ : Optional[Any]=256 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.02 , snake_case__ : Any=2 , snake_case__ : List[Any]=True , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=0 , snake_case__ : int=2 , snake_case__ : Dict=6000 , snake_case__ : int=1024 , snake_case__ : str=2 , snake_case__ : List[str]=(5, 5) , snake_case__ : Optional[int]=1024 , snake_case__ : Tuple=80 , snake_case__ : str=1 , **snake_case__ : int , ):
lowerCamelCase_ : Any =vocab_size
lowerCamelCase_ : Any =d_model
lowerCamelCase_ : Dict =encoder_ffn_dim
lowerCamelCase_ : Optional[Any] =encoder_layers
lowerCamelCase_ : Dict =encoder_attention_heads
lowerCamelCase_ : Optional[Any] =decoder_ffn_dim
lowerCamelCase_ : Optional[Any] =decoder_layers
lowerCamelCase_ : Optional[int] =decoder_attention_heads
lowerCamelCase_ : List[Any] =dropout
lowerCamelCase_ : Optional[Any] =attention_dropout
lowerCamelCase_ : Optional[int] =activation_dropout
lowerCamelCase_ : Optional[int] =activation_function
lowerCamelCase_ : int =init_std
lowerCamelCase_ : Any =encoder_layerdrop
lowerCamelCase_ : List[Any] =decoder_layerdrop
lowerCamelCase_ : Any =use_cache
lowerCamelCase_ : int =encoder_layers
lowerCamelCase_ : str =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ : str =max_source_positions
lowerCamelCase_ : str =max_target_positions
lowerCamelCase_ : Tuple =num_conv_layers
lowerCamelCase_ : Optional[Any] =list(snake_case__ )
lowerCamelCase_ : Dict =conv_channels
lowerCamelCase_ : List[str] =input_feat_per_channel
lowerCamelCase_ : Tuple =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
| 144 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE_: List[Any] =canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ):
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ):
# laplace diagonals
SCREAMING_SNAKE_CASE_: str =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE_: Tuple =conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ):
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
SCREAMING_SNAKE_CASE_: Dict =bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
SCREAMING_SNAKE_CASE_: int =rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE_: Tuple =imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: List[Any] =image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE_: Optional[Any] =lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE_: Dict =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE_: List[str] =lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 173 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = StableDiffusionSAGPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
torch.manual_seed(0 )
lowercase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ : Dict = CLIPTextModel(lowercase_ )
lowercase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Tuple = torch.manual_seed(lowercase_ )
else:
lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowercase_ : Optional[int] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = """."""
lowercase_ : List[Any] = torch.manual_seed(0 )
lowercase_ : int = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : Dict = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : List[str] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : Tuple = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = """."""
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : Optional[int] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : str = output.images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : Union[str, Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = """."""
lowercase_ : List[str] = torch.manual_seed(0 )
lowercase_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
lowercase_ : int = output.images
assert image.shape == (1, 512, 768, 3)
| 21 | '''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__a :Any = logging.getLogger()
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {}
A_ = os.path.join(__UpperCamelCase ,"all_results.json" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase ,"r" ) as f:
A_ = json.load(__UpperCamelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__a :Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
import xla_spawn
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
A_ = time()
xla_spawn.main()
A_ = time()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __A ( self : Optional[int] ):
import xla_spawn
A_ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
xla_spawn.main() | 312 |
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = eval_examples
_snake_case = post_process_function
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = "eval" , **lowerCAmelCase_ , ) -> Dict[str, float]:
_snake_case = gen_kwargs.copy()
_snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
_snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
_snake_case = gen_kwargs
_snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case = self.get_eval_dataloader(lowerCAmelCase_ )
_snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
_snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" , **lowerCAmelCase_ ) -> str:
_snake_case = gen_kwargs.copy()
_snake_case = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 'predict' )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 295 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295 | 1 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
class snake_case :
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__A = metric_id
class snake_case :
'''simple docstring'''
A_ : Union[str, Any] = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if "tmp_path" in args:
__A = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__UpperCamelCase , match='''https://huggingface.co/docs/evaluate''' ):
func(*__UpperCamelCase )
| 266 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ):
"""simple docstring"""
__A = defaultdict(__UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 266 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , "do_lower_case" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["的", "人", "有"]
_SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase )
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_SCREAMING_SNAKE_CASE = "你好,你是谁"
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 111 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''efficientnet'''
def __init__( self : Optional[Any] , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 6_0_0 , __lowerCamelCase : float = 2.0 , __lowerCamelCase : float = 3.1 , __lowerCamelCase : int = 8 , __lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase : List[int] = [] , __lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase : float = 0.2_5 , __lowerCamelCase : str = "swish" , __lowerCamelCase : int = 2_5_6_0 , __lowerCamelCase : str = "mean" , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 0.0_0_1 , __lowerCamelCase : float = 0.9_9 , __lowerCamelCase : float = 0.5 , __lowerCamelCase : float = 0.2 , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = width_coefficient
_SCREAMING_SNAKE_CASE = depth_coefficient
_SCREAMING_SNAKE_CASE = depth_divisor
_SCREAMING_SNAKE_CASE = kernel_sizes
_SCREAMING_SNAKE_CASE = in_channels
_SCREAMING_SNAKE_CASE = out_channels
_SCREAMING_SNAKE_CASE = depthwise_padding
_SCREAMING_SNAKE_CASE = strides
_SCREAMING_SNAKE_CASE = num_block_repeats
_SCREAMING_SNAKE_CASE = expand_ratios
_SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = pooling_type
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = batch_norm_eps
_SCREAMING_SNAKE_CASE = batch_norm_momentum
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = drop_connect_rate
_SCREAMING_SNAKE_CASE = sum(__lowerCamelCase ) * 4
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return 1e-5
| 111 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.