code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import math
def snake_case( __magic_name__ , __magic_name__ ) -> list:
'''simple docstring'''
if len(_lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowercase : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def snake_case( __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def snake_case( __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def snake_case( __magic_name__ ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(_lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowercase : List[Any] = len(_lowerCAmelCase )
lowercase : List[Any] = matrix_length // 2
lowercase : List[Any] = [[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase )]
lowercase : Tuple = [
[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )
]
lowercase : Optional[Any] = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase )]
lowercase : List[str] = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def snake_case( __magic_name__ ) -> tuple[int, int]:
'''simple docstring'''
return len(_lowerCAmelCase ), len(matrix[0] )
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
print('''\n'''.join(str(_lowerCAmelCase ) for line in matrix ) )
def snake_case( __magic_name__ , __magic_name__ ) -> list:
'''simple docstring'''
if matrix_dimensions(_lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase , lowercase , lowercase : Any = split_matrix(_lowerCAmelCase )
lowercase , lowercase , lowercase , lowercase : Any = split_matrix(_lowerCAmelCase )
lowercase : Optional[Any] = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase : Tuple = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
lowercase : str = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
lowercase : Optional[int] = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase : Any = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase : List[str] = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase : Tuple = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
lowercase : Any = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
lowercase : Tuple = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
lowercase : int = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase : Tuple = []
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def snake_case( __magic_name__ , __magic_name__ ) -> list:
'''simple docstring'''
if matrix_dimensions(_lowerCAmelCase )[1] != matrix_dimensions(_lowerCAmelCase )[0]:
lowercase : int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(_lowerCAmelCase )
lowercase : Tuple = matrix_dimensions(_lowerCAmelCase )
lowercase : Dict = matrix_dimensions(_lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase : str = max(*_lowerCAmelCase , *_lowerCAmelCase )
lowercase : Any = int(math.pow(2 , math.ceil(math.loga(_lowerCAmelCase ) ) ) )
lowercase : int = matrixa
lowercase : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase : Dict = actual_strassen(_lowerCAmelCase , _lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 217 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
from itertools import permutations
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_snake_case : List[Any] = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] = 10 ) -> int:
return sum(
int("""""".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 477 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ = True
except ImportError:
A_ = False
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( A ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( UpperCamelCase_: ArgumentParser ):
UpperCamelCase_ =parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=__UpperCamelCase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=__UpperCamelCase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=__UpperCamelCase )
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None , *UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =testing
UpperCamelCase_ =testing_file
UpperCamelCase_ =path
def UpperCamelCase__ ( self: List[str] ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase_ =[directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(__UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCamelCase_ =(
Path(__UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase_ =path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__UpperCamelCase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCamelCase_ =json.load(__UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCamelCase , extra_context=__UpperCamelCase , )
UpperCamelCase_ =[directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCamelCase_ =json.load(__UpperCamelCase )
UpperCamelCase_ =configuration["lowercase_modelname"]
UpperCamelCase_ =configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase_ ="PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCamelCase_ ="TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCamelCase_ ="Flax" in generate_tensorflow_pytorch_and_flax
UpperCamelCase_ =f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(UpperCamelCase_: List[str] ):
with open(__UpperCamelCase , "r" ) as f:
UpperCamelCase_ =f.readlines()
with open(__UpperCamelCase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: List[str] ):
# Create temp file
UpperCamelCase_ , UpperCamelCase_ =mkstemp()
UpperCamelCase_ =False
with fdopen(__UpperCamelCase , "w" ) as new_file:
with open(__UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(__UpperCamelCase )
if line_to_copy_below in line:
UpperCamelCase_ =True
for line_to_copy in lines_to_copy:
new_file.write(__UpperCamelCase )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__UpperCamelCase , __UpperCamelCase )
# Remove original file
remove(__UpperCamelCase )
# Move new file
move(__UpperCamelCase , __UpperCamelCase )
def skip_units(UpperCamelCase_: Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase_: Union[str, Any] ):
with open(__UpperCamelCase ) as datafile:
UpperCamelCase_ =[]
UpperCamelCase_ =False
UpperCamelCase_ =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase_ =line.split("\"" )[1]
UpperCamelCase_ =skip_units(__UpperCamelCase )
elif "# Below: " in line and "##" not in line:
UpperCamelCase_ =line.split("\"" )[1]
UpperCamelCase_ =skip_units(__UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =[]
elif "# Replace with" in line and "##" not in line:
UpperCamelCase_ =[]
elif "##" not in line:
lines_to_copy.append(__UpperCamelCase )
remove(__UpperCamelCase )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__UpperCamelCase )
| 391 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ : Optional[int] = (3, 9, -11, 0, 7, 5, 1, -1)
a_ : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : int
_lowercase : Node | None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = None
for i in sorted(__UpperCamelCase , reverse=__UpperCamelCase ):
a__ = Node(__UpperCamelCase , self.head )
def __iter__( self ) -> List[Any]:
a__ = self.head
while node:
yield node.data
a__ = node.next_node
def __len__( self ) -> Optional[int]:
return sum(1 for _ in self )
def __str__( self ) -> Dict:
return " -> ".join([str(__UpperCamelCase ) for node in self] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 194 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'''vocab_file''': '''spiece.model'''}
snake_case__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
snake_case__ = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __A : Any , __A : List[str]=False , __A : Dict=False , __A : str=False , __A : Any=None , __A : Dict=None , __A : List[str]=None , __A : Optional[Any]=None , __A : Optional[Dict[str, Any]] = None , **__A : List[str] , ) ->Optional[int]:
"""simple docstring"""
a__ :Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
a__ :str = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
a__ :int = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a__ :List[Any] = "<|endoftext|>" if eos_token is None else eos_token
a__ :Optional[int] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a__ :List[Any] = unk_token if pad_token is None else pad_token
a__ :List[str] = eos_token if bos_token is None else bos_token
else:
a__ :Optional[Any] = "<pad>" if pad_token is None else pad_token
a__ :Dict = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
a__ :str = do_lower_case
a__ :Dict = remove_space
a__ :List[Any] = keep_accents
a__ :List[Any] = vocab_file
a__ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
a__ :Tuple = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a__ :Any = re.compile(
F'''[{''.join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : int ) ->List[str]:
"""simple docstring"""
a__ :Tuple = self.__dict__.copy()
a__ :List[Any] = None
return state
def __setstate__( self : str , __A : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ :str = {}
a__ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self : List[Any] ) ->Any:
"""simple docstring"""
return len(self.sp_model )
def _snake_case ( self : Union[str, Any] , __A : str ) ->str:
"""simple docstring"""
a__ :List[str] = self.non_printing_characters_re.sub("" , __UpperCamelCase )
# Normalize whitespaces
a__ :str = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
a__ :List[str] = unicodedata.normalize("NFC" , __UpperCamelCase )
return text
def _snake_case ( self : Tuple , __A : str , **__A : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[Any] = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _snake_case ( self : Any , __A : str ) ->int:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __A : int ) ->Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def _snake_case ( __A : str ) ->Any:
"""simple docstring"""
return out_string
def _snake_case ( self : Union[str, Any] , __A : List[str] ) ->Dict:
"""simple docstring"""
a__ :Dict = []
a__ :List[Any] = ""
a__ :Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
a__ :Union[str, Any] = True
a__ :List[str] = []
else:
current_sub_tokens.append(__UpperCamelCase )
a__ :Tuple = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def _snake_case ( self : Dict ) ->Any:
"""simple docstring"""
a__ :Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Tuple , __A : str , __A : Optional[str] = None ) ->Union[str, Any]:
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ :List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
a__ :Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _snake_case ( self : Tuple , __A : Union[str, List[str]] , __A : Union[str, bool] = False ) ->Tuple:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
a__ :List[str] = self.preprocess_text(__UpperCamelCase )
a__ :Tuple = self.sp_model.encode(__UpperCamelCase )
else:
a__ :Optional[int] = [self.preprocess_text(__UpperCamelCase ) for t in text]
a__ :Optional[int] = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
a__ :Any = torch.tensor(__UpperCamelCase )
return token_ids
def _snake_case ( self : Dict , __A : Union[int, List[int]] ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.decode(__UpperCamelCase )
def _snake_case ( self : List[Any] , __A : "Conversation" ) ->List[Any]:
"""simple docstring"""
a__ :int = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
a__ :Optional[int] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(__UpperCamelCase ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=__UpperCamelCase )
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__magic_name__ : Dict = getLogger(__name__)
def A__ ( A_ , A_ , A_ , A_ = 8 , A_ = 1_024 , A_="val" , A_=None , A_=False , A_="summarization" , A_=None , A_=1 , A_ = None , A_="" , **A_ , ) -> Dict:
_lowercase = str(_lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=_lowerCAmelCase )
_lowercase = Path(_lowerCAmelCase )
_lowercase = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowerCAmelCase )
_lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).cuda()
if fpaa:
_lowercase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase ) # update config with task specific params
_lowercase = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowercase = num_return_sequences
_lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowercase = tokenizer.model_max_length
if prefix is None:
_lowercase = prefix or getattr(model.config , "prefix" , "" ) or ""
_lowercase = SeqaSeqDataset(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_target_length=1_024 , type_path=_lowerCAmelCase , n_obs=_lowerCAmelCase , prefix=_lowerCAmelCase , **_lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowercase = ds.make_sortish_sampler(_lowerCAmelCase , distributed=_lowerCAmelCase , add_extra_examples=_lowerCAmelCase , shuffle=_lowerCAmelCase )
_lowercase = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=ds.collate_fn )
_lowercase = []
for batch in tqdm(_lowerCAmelCase ):
_lowercase = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_lowerCAmelCase , num_beams=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowercase = batch["ids"]
if num_return_sequences > 1:
_lowercase = chunks(_lowerCAmelCase , _lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(_lowerCAmelCase , _lowerCAmelCase )
return results, sampler.num_replicas
def A__ ( ) -> Optional[int]:
_lowercase = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=_lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=_lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=_lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument(
"--type_path" , type=_lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=_lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=_lowerCAmelCase , default=1 , required=_lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=_lowerCAmelCase , default=600 , required=_lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument(
"--prefix" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
_lowercase = time.time()
_lowercase , _lowercase = parser.parse_known_args()
_lowercase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
_lowercase = Path(args.save_dir + "_tmp" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) # this handles locking.
_lowercase = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowercase = {}
if args.src_lang is not None:
_lowercase = args.src_lang
if args.tgt_lang is not None:
_lowercase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowerCAmelCase )
_lowercase , _lowercase = eval_data_dir(
args.data_dir , _lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowerCAmelCase , **_lowerCAmelCase , )
if args.local_rank <= 0:
_lowercase = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
_lowercase = gather_results_from_each_node(_lowerCAmelCase , _lowerCAmelCase , args.sync_timeout )
_lowercase = combine_partial_results(_lowerCAmelCase )
if args.num_return_sequences > 1:
_lowercase = save_dir.joinpath("pseudolabel_results.json" )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowerCAmelCase , _lowerCAmelCase )
return
_lowercase = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(_lowerCAmelCase ) as f:
_lowercase = [x.rstrip() for x in f.readlines()][: len(_lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
_lowercase = "translation" in args.task
_lowercase = calculate_bleu if calc_bleu else calculate_rouge
_lowercase = "bleu" if calc_bleu else "rouge"
_lowercase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
_lowercase = len(_lowerCAmelCase )
_lowercase = time.time() - start_time
_lowercase = round(runtime / metrics["n_obs"] , 4 )
_lowercase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowercase = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
print(_lowerCAmelCase )
write_txt_file(_lowerCAmelCase , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowerCAmelCase , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowerCAmelCase )
def A__ ( A_ ) -> List:
_lowercase = []
for partial_result in partial_results:
records.extend(_lowerCAmelCase )
_lowercase = sorted(_lowerCAmelCase , key=lambda A_ : x["id"] )
_lowercase = [x["pred"] for x in records]
return preds
def A__ ( A_ , A_ , A_ ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
_lowercase = time.time()
logger.info("waiting for all nodes to finish" )
_lowercase = None
while (time.time() - start_wait) < timeout:
_lowercase = list(save_dir.glob("rank_*.json" ) )
if len(_lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowercase = lmap(_lowerCAmelCase , _lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 497 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> tuple[str, float]:
'''simple docstring'''
__lowercase = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] )
return (item, float(_lowerCAmelCase ))
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> tuple[str, str]:
'''simple docstring'''
__lowercase = random.randint(0 , len(_lowerCAmelCase ) - 1 )
__lowercase = parent_a[:random_slice] + parent_a[random_slice:]
__lowercase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
__lowercase = list(_lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowercase = random.choice(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> list[str]:
'''simple docstring'''
__lowercase = []
# Generate more children proportionally to the fitness score.
__lowercase = int(parent_a[1] * 100 ) + 1
__lowercase = 10 if child_n >= 10 else child_n
for _ in range(_lowerCAmelCase ):
__lowercase = population_score[random.randint(0 , _lowerCAmelCase )][0]
__lowercase , __lowercase = crossover(parent_a[0] , _lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
return pop
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__lowercase = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCAmelCase )
# Generate random starting population.
__lowercase = []
for _ in range(_lowerCAmelCase ):
population.append("".join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase , __lowercase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
__lowercase = sorted(_lowerCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCAmelCase )
# Normalize population score to be between 0 and 1.
__lowercase = [
(item, score / len(_lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCAmelCase ):
population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> list:
__snake_case = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
__snake_case = collection[i]
__snake_case = 0
__snake_case = i - 1
while low <= high:
__snake_case = (low + high) // 2
if val < collection[mid]:
__snake_case = mid - 1
else:
__snake_case = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
__snake_case = collection[j - 1]
__snake_case = val
return collection
if __name__ == "__main__":
a : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
a : Dict = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowercase_ = '''.'''
if __name__ == "__main__":
lowercase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
lowercase_ = []
lowercase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowercase_ = line.strip()
lowercase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowercase_ = '''\n'''.join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 354 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> List[Any]:
lowercase : Union[str, Any] =0
if start < end:
lowercase : List[str] =randint(_lowerCAmelCase , _lowerCAmelCase )
lowercase : Optional[int] =a[end]
lowercase : str =a[pivot]
lowercase : Optional[int] =temp
lowercase , lowercase : int =_in_place_partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase , _lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase , p + 1 , _lowerCAmelCase )
return count
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
lowercase : List[str] =0
lowercase : Tuple =randint(_lowerCAmelCase , _lowerCAmelCase )
lowercase : Optional[int] =a[end]
lowercase : Optional[int] =a[pivot]
lowercase : List[str] =temp
lowercase : List[str] =start - 1
for index in range(_lowerCAmelCase , _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase : int =new_pivot_index + 1
lowercase : str =a[new_pivot_index]
lowercase : int =a[index]
lowercase : List[Any] =temp
lowercase : List[Any] =a[new_pivot_index + 1]
lowercase : Union[str, Any] =a[end]
lowercase : Dict =temp
return new_pivot_index + 1, count
UpperCamelCase_ = TemporaryFile()
UpperCamelCase_ = 100 # 1000 elements are to be sorted
UpperCamelCase_ , UpperCamelCase_ = 0, 1 # mean and standard deviation
UpperCamelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase_ = np.load(outfile)
UpperCamelCase_ = len(M) - 1
UpperCamelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 92 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 10
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = [1, 2, 3, 4]
lowercase_ :Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowercase_ :str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowercase_ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'''
lowercase_ , lowercase_ :str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCamelCase ( self ):
lowercase_ :int = ''''''
lowercase_ , lowercase_ :str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCamelCase ( self ):
lowercase_ :Dict = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ :Any = process_story(__UpperCamelCase )
lowercase_ :List[Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ :int = ['''It was the best of times.''']
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = torch.tensor([1, 2, 3, 4] )
lowercase_ :Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowercase_ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ :Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = 101
lowercase_ :Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowercase_ :List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ :Union[str, Any] = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 257 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Tuple = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowercase : List[str] = hex_num[0] == '''-'''
if is_negative:
lowercase : Optional[Any] = hex_num[1:]
try:
lowercase : Dict = int(_lowerCAmelCase , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowercase : Dict = ''''''
while int_num > 0:
lowercase : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 217 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
snake_case_ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_snake_case : str = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = vqa_pipeline(__UpperCamelCase , top_k=1)
self.assertEqual(
__UpperCamelCase , [
[{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}],
[{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}],
] , )
@require_torch
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Any = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_snake_case : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : List[str] = """How many cats are there?"""
_snake_case : Union[str, Any] = vqa_pipeline(image=__UpperCamelCase , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}, {"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}])
_snake_case : int = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}, {"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}])
@slow
@require_torch
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
_snake_case : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : List[Any] = """How many cats are there?"""
_snake_case : Optional[int] = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_snake_case : Tuple = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_snake_case : List[Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def UpperCamelCase_ ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
| 477 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str=13 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: str=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=False , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=2 , UpperCamelCase_: Any=99 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Any=32 , UpperCamelCase_: List[str]=5 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: str=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: str=4 , UpperCamelCase_: Tuple="last" , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=None , UpperCamelCase_: str=0 , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =seq_length
UpperCamelCase_ =is_training
UpperCamelCase_ =use_input_lengths
UpperCamelCase_ =use_token_type_ids
UpperCamelCase_ =use_labels
UpperCamelCase_ =gelu_activation
UpperCamelCase_ =sinusoidal_embeddings
UpperCamelCase_ =causal
UpperCamelCase_ =asm
UpperCamelCase_ =n_langs
UpperCamelCase_ =vocab_size
UpperCamelCase_ =n_special
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =type_sequence_label_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =num_labels
UpperCamelCase_ =num_choices
UpperCamelCase_ =summary_type
UpperCamelCase_ =use_proj
UpperCamelCase_ =scope
UpperCamelCase_ =bos_token_id
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ =None
if self.use_input_lengths:
UpperCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase_ =None
if self.use_token_type_ids:
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =None
if self.use_labels:
UpperCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self: List[str] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: int , ):
UpperCamelCase_ =XLMModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
UpperCamelCase_ =model(__UpperCamelCase , langs=__UpperCamelCase )
UpperCamelCase_ =model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: int , ):
UpperCamelCase_ =XLMWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Tuple , ):
UpperCamelCase_ =XLMForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase )
UpperCamelCase_ =model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
UpperCamelCase_ =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: Tuple , ):
UpperCamelCase_ =XLMForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase )
UpperCamelCase_ =model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
UpperCamelCase_ =model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
((UpperCamelCase_ ) , ) =result_with_labels.to_tuple()
UpperCamelCase_ =model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
((UpperCamelCase_ ) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: int , ):
UpperCamelCase_ =XLMForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase )
UpperCamelCase_ =model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , ):
UpperCamelCase_ =self.num_labels
UpperCamelCase_ =XLMForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , ):
UpperCamelCase_ =self.num_choices
UpperCamelCase_ =XLMForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ =model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) =config_and_inputs
UpperCamelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCamelCase : List[str] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=False ):
UpperCamelCase_ =super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
UpperCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =XLMModelTester(self )
UpperCamelCase_ =ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def UpperCamelCase__ ( self: List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCamelCase )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCamelCase )
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCamelCase )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCamelCase )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCamelCase )
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCamelCase )
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCamelCase )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: int=1 ):
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_attentions in attentions] , [True] * len(__UpperCamelCase ) )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCamelCase ):
# adds PAD dummy token
UpperCamelCase_ =min_length + idx + 1
UpperCamelCase_ =min_length + idx + 1
UpperCamelCase_ =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCamelCase ) )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[str]=1 ):
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCamelCase ) , )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCamelCase ):
# adds PAD dummy token
UpperCamelCase_ =min_length + idx + 1
UpperCamelCase_ =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCamelCase ) , )
pass
@slow
def UpperCamelCase__ ( self: Dict ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ =XLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(__UpperCamelCase )
UpperCamelCase_ =torch.tensor([[14, 447]] , dtype=torch.long , device=__UpperCamelCase ) # the president
UpperCamelCase_ =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase_ =model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCamelCase )
| 391 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 0 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Any = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : int = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : int = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[str] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[str] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
def __a ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(_lowerCAmelCase , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[str] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[str] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Any = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Tuple = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Any = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Tuple = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : str = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[str] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : List[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : int = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Dict = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Any = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Tuple = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
class __UpperCamelCase ( metaclass=_lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = ["""torch"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ['''torch'''] )
| 194 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = """efficientnet"""
def __init__( self : List[Any] , __A : int = 3 , __A : int = 600 , __A : float = 2.0 , __A : float = 3.1 , __A : int = 8 , __A : List[int] = [3, 3, 5, 3, 5, 5, 3] , __A : List[int] = [32, 16, 24, 40, 80, 112, 192] , __A : List[int] = [16, 24, 40, 80, 112, 192, 320] , __A : List[int] = [] , __A : List[int] = [1, 2, 2, 2, 1, 2, 1] , __A : List[int] = [1, 2, 2, 3, 3, 4, 1] , __A : List[int] = [1, 6, 6, 6, 6, 6, 6] , __A : float = 0.25 , __A : str = "swish" , __A : int = 2560 , __A : str = "mean" , __A : float = 0.02 , __A : float = 0.001 , __A : float = 0.99 , __A : float = 0.5 , __A : float = 0.2 , **__A : List[Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
a__ :List[str] = num_channels
a__ :Union[str, Any] = image_size
a__ :Tuple = width_coefficient
a__ :Optional[int] = depth_coefficient
a__ :Dict = depth_divisor
a__ :Any = kernel_sizes
a__ :List[str] = in_channels
a__ :Any = out_channels
a__ :List[Any] = depthwise_padding
a__ :str = strides
a__ :Any = num_block_repeats
a__ :Any = expand_ratios
a__ :List[str] = squeeze_expansion_ratio
a__ :int = hidden_act
a__ :Optional[Any] = hidden_dim
a__ :Optional[Any] = pooling_type
a__ :List[str] = initializer_range
a__ :Optional[Any] = batch_norm_eps
a__ :Tuple = batch_norm_momentum
a__ :str = dropout_rate
a__ :Tuple = drop_connect_rate
a__ :str = sum(__UpperCamelCase ) * 4
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = version.parse('1.11')
@property
def _snake_case ( self : Optional[int] ) ->str:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return 1E-5
| 395 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None
@property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = (3, 3_2, 1_2_8)
_lowercase = tempfile.mkdtemp()
# fmt: off
_lowercase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowercase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
_lowercase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
_lowercase = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def snake_case ( self : List[str] , **__A : List[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def snake_case ( self : Union[str, Any] , **__A : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def snake_case ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_lowercase = Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) )
return image_input
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.get_tokenizer()
_lowercase = self.get_image_processor()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.get_tokenizer()
_lowercase = self.get_image_processor()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowercase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
_lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = self.prepare_image_inputs()
_lowercase = image_processor(__UpperCamelCase , return_tensors="np" )
_lowercase = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = "test"
_lowercase = processor(text=__UpperCamelCase )
_lowercase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = "test"
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowercase = processor.char_decode(__UpperCamelCase )
_lowercase = tokenizer.batch_decode(__UpperCamelCase )
_lowercase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = None
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_lowercase = torch.randn(1 , 2_7 , 3_8 )
_lowercase = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_lowercase = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 497 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 0 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowerCAmelCase__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowercase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowercase = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 321 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
'''simple docstring'''
a : List[Any] = [0, 2, 4, 6, 8]
a : Any = [1, 3, 5, 7, 9]
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__snake_case = 0
for digit in range(10 ):
__snake_case = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase )
return result
__snake_case = 0
for digita in range(10 ):
__snake_case = digita
if (remainder + digita) % 2 == 0:
__snake_case = ODD_DIGITS
else:
__snake_case = EVEN_DIGITS
for digita in other_parity_digits:
__snake_case = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , )
return result
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] = 9 ) -> int:
__snake_case = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = """"""
SCREAMING_SNAKE_CASE = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str] , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[str] = None , **snake_case_ : Tuple , )-> Dict:
super().__init__(self , **__UpperCamelCase)
__lowerCAmelCase =repo_info
__lowerCAmelCase =token
__lowerCAmelCase =None
def UpperCamelCase ( self : List[Any])-> List[str]:
if self.dir_cache is None:
__lowerCAmelCase ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__lowerCAmelCase ={
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase): {"""name""": str(__UpperCamelCase), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : str = "rb" , **snake_case_ : Any , )-> str:
if not isinstance(self.repo_info , __UpperCamelCase):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""")
__lowerCAmelCase =hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self : List[Any] , snake_case_ : Dict , **snake_case_ : List[str])-> str:
self._get_dirs()
__lowerCAmelCase =self._strip_protocol(__UpperCamelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase)
def UpperCamelCase ( self : Any , snake_case_ : List[str] , snake_case_ : Union[str, Any]=False , **snake_case_ : List[Any])-> Any:
self._get_dirs()
__lowerCAmelCase =PurePosixPath(path.strip("""/"""))
__lowerCAmelCase ={}
for p, f in self.dir_cache.items():
__lowerCAmelCase =PurePosixPath(p.strip("""/"""))
__lowerCAmelCase =p.parent
if root == path:
__lowerCAmelCase =f
__lowerCAmelCase =list(paths.values())
if detail:
return out
else:
return sorted(f["""name"""] for f in out)
| 354 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
lowercase , lowercase , lowercase : Tuple =equationa
lowercase , lowercase , lowercase : Optional[Any] =equationa
# Calculate the determinants of the matrices
lowercase : Tuple =aa * ba - aa * ba
lowercase : Optional[int] =ca * ba - ca * ba
lowercase : str =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase : List[Any] =determinant_x / determinant
lowercase : Any =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 92 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 0 |
def UpperCamelCase ( _a ) -> list:
'''simple docstring'''
for i in range(len(_lowerCAmelCase ) - 1 , 0 , -1 ):
lowercase_ :List[Any] = False
for j in range(_lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ :int = unsorted[j - 1], unsorted[j]
lowercase_ :str = True
for j in range(_lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ :Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ :Union[str, Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Any = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 257 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def snake_case( __magic_name__ , __magic_name__ = 16 , __magic_name__ = "bert-base-cased" ) -> List[str]:
'''simple docstring'''
lowercase : int = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
model.eval()
lowercase : Tuple = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : List[str] = model(**_lowerCAmelCase )
lowercase : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase , lowercase : Union[str, Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCAmelCase ) - 1:
lowercase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
lowercase : List[str] = metric.compute()
return eval_metric["accuracy"]
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : List[Any] = config['''lr''']
lowercase : List[str] = int(config['''num_epochs'''] )
lowercase : List[Any] = int(config['''seed'''] )
lowercase : Tuple = int(config['''batch_size'''] )
lowercase : Optional[int] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase , lowercase : List[Any] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : int = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase : Optional[int] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase : Tuple = 1
lowercase : Dict = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase : Union[str, Any] = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase : int = 0
lowercase : Dict = evaluate.load('''glue''' , '''mrpc''' )
lowercase : Dict = num_epochs
if args.partial_train_epoch is not None:
lowercase : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowercase : Dict = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase : Tuple = int(_lowerCAmelCase ) + 1
lowercase : Optional[Any] = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.print('''resumed checkpoint performance:''' , _lowerCAmelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowercase : Tuple = json.load(_lowerCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase : Dict = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase : Union[str, Any] = model(**_lowerCAmelCase )
lowercase : Optional[int] = outputs.loss
lowercase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase : Dict = F"""epoch_{epoch}"""
lowercase : Tuple = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
lowercase : Dict = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase : List[str] = accuracy
lowercase : Union[str, Any] = lr_scheduler.get_lr()[0]
lowercase : Any = optimizer.param_groups[0]['''lr''']
lowercase : Tuple = epoch
lowercase : List[str] = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case( ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowerCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowerCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=_lowerCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowerCAmelCase , default=2 , help='''Number of train epochs.''' , )
lowercase : int = parser.parse_args()
lowercase : int = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main() | 217 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
import argparse
import os
import re
a__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
a__ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] = False ) -> List[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = f.read()
_snake_case : Tuple = content.split("""\n""" )
_snake_case : Any = []
_snake_case : List[str] = 0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_snake_case : List[Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_snake_case : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_snake_case : List[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_snake_case : Any = sorted(_lowerCAmelCase , key=lambda SCREAMING_SNAKE_CASE__ : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def lowercase ( SCREAMING_SNAKE_CASE__ : Any = False ) -> Tuple:
_snake_case : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith(""".py""" )]
_snake_case : Union[str, Any] = [sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
_snake_case : Optional[Any] = [f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(_lowerCAmelCase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 477 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ = "<<<<<<< This should probably be modified because it mentions: "
A_ = "=======\n>>>>>>>\n"
A_ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
A_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def _UpperCamelCase ( A ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( UpperCamelCase_: ArgumentParser ):
UpperCamelCase_ =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: List[str] ):
UpperCamelCase_ =get_logger("datasets-cli/converting" )
UpperCamelCase_ =tfds_path
UpperCamelCase_ =datasets_directory
def UpperCamelCase__ ( self: str ):
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ =os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCamelCase_ =os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCamelCase_ =os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ ={}
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ =os.listdir(__UpperCamelCase )
else:
UpperCamelCase_ =[os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isfile(__UpperCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__UpperCamelCase , encoding="utf-8" ) as f:
UpperCamelCase_ =f.readlines()
UpperCamelCase_ =[]
UpperCamelCase_ =False
UpperCamelCase_ =False
UpperCamelCase_ =[]
for line in lines:
UpperCamelCase_ =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCamelCase_ ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCamelCase_ =""
continue
elif "from absl import logging" in out_line:
UpperCamelCase_ ="from datasets import logging\n"
elif "getLogger" in out_line:
UpperCamelCase_ =out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCamelCase_ =True
UpperCamelCase_ =list(filter(lambda UpperCamelCase_ : e in out_line , __UpperCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__UpperCamelCase ) + "\n" )
out_lines.append(__UpperCamelCase )
out_lines.append(__UpperCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCamelCase_ =re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCamelCase_ =re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __UpperCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCamelCase_ ="from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCamelCase_ =True
out_lines.append(__UpperCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCamelCase_ =f_name.replace(".py" , "" )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__UpperCamelCase )
if needs_manual_update:
with_manual_update.append(__UpperCamelCase )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__UpperCamelCase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCamelCase_ =os.path.basename(__UpperCamelCase )
UpperCamelCase_ =imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__UpperCamelCase , __UpperCamelCase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 391 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
def __a ( ):
a__ = []
a__ = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
a__ = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 194 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
snake_case__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
snake_case__ = {
'''jukebox''': 512,
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __A : str , __A : List[Any] , __A : List[str] , __A : List[str]=["v3", "v2", "v2"] , __A : str=512 , __A : Union[str, Any]=5 , __A : List[Any]="<|endoftext|>" , **__A : Union[str, Any] , ) ->Optional[Any]:
"""simple docstring"""
a__ :Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
super().__init__(
unk_token=__UpperCamelCase , n_genres=__UpperCamelCase , version=__UpperCamelCase , max_n_lyric_tokens=__UpperCamelCase , **__UpperCamelCase , )
a__ :Any = version
a__ :List[Any] = max_n_lyric_tokens
a__ :List[str] = n_genres
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
a__ :Optional[Any] = json.load(__UpperCamelCase )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
a__ :Optional[Any] = json.load(__UpperCamelCase )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
a__ :Any = json.load(__UpperCamelCase )
a__ :Optional[int] = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
a__ :Any = oov.replace(r"\-'" , r"\-+'" )
a__ :Tuple = regex.compile(__UpperCamelCase )
a__ :Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
a__ :Union[str, Any] = {v: k for k, v in self.genres_encoder.items()}
a__ :int = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _snake_case ( self : Dict ) ->Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _snake_case ( self : Dict ) ->int:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _snake_case ( self : int , __A : int , __A : Optional[int] , __A : Dict ) ->Optional[Any]:
"""simple docstring"""
a__ :List[Any] = [self.artists_encoder.get(__UpperCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__UpperCamelCase ) ):
a__ :Any = [self.genres_encoder.get(__UpperCamelCase , 0 ) for genre in list_genres[genres]]
a__ :Optional[int] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
a__ :Dict = [[self.lyrics_encoder.get(__UpperCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _snake_case ( self : int , __A : List[str] ) ->Tuple:
"""simple docstring"""
return list(__UpperCamelCase )
def _snake_case ( self : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : str , **__A : Dict ) ->int:
"""simple docstring"""
a__ , a__ , a__ :List[Any] = self.prepare_for_tokenization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a__ :Optional[Any] = self._tokenize(__UpperCamelCase )
return artist, genre, lyrics
def _snake_case ( self : Optional[int] , __A : str , __A : str , __A : str , __A : bool = False ) ->Dict:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
a__ :Optional[Any] = artists[idx].lower()
a__ :str = [genres[idx].lower()]
else:
a__ :Any = self._normalize(artists[idx] ) + ".v2"
a__ :str = [
self._normalize(__UpperCamelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
a__ :List[str] = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
a__ :List[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
a__ :Any = {vocab[index]: index + 1 for index in range(len(__UpperCamelCase ) )}
a__ :Union[str, Any] = 0
a__ :int = len(__UpperCamelCase ) + 1
a__ :Union[str, Any] = self.vocab
a__ :Tuple = {v: k for k, v in self.vocab.items()}
a__ :Any = ""
else:
a__ :Tuple = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
a__ :List[str] = self._run_strip_accents(__UpperCamelCase )
a__ :int = lyrics.replace("\\" , "\n" )
a__ :Optional[int] = self.out_of_vocab.sub("" , __UpperCamelCase ), [], []
return artists, genres, lyrics
def _snake_case ( self : Dict , __A : Dict ) ->Any:
"""simple docstring"""
a__ :int = unicodedata.normalize("NFD" , __UpperCamelCase )
a__ :List[Any] = []
for char in text:
a__ :List[Any] = unicodedata.category(__UpperCamelCase )
if cat == "Mn":
continue
output.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
def _snake_case ( self : Dict , __A : str ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = (
[chr(__UpperCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(__UpperCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(__UpperCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
a__ :List[str] = frozenset(__UpperCamelCase )
a__ :Tuple = re.compile(r"_+" )
a__ :Optional[Any] = "".join([c if c in accepted else "_" for c in text.lower()] )
a__ :int = pattern.sub("_" , __UpperCamelCase ).strip("_" )
return text
def _snake_case ( self : List[str] , __A : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return " ".join(__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __A : Any , __A : Optional[Union[str, TensorType]] = None , __A : bool = False ) ->Dict:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
a__ :Optional[int] = TensorType(__UpperCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
a__ :Dict = tf.constant
a__ :Tuple = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
a__ :List[str] = torch.tensor
a__ :List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
a__ :Any = jnp.array
a__ :List[str] = _is_jax
else:
a__ :Union[str, Any] = np.asarray
a__ :List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
a__ :Any = [inputs]
if not is_tensor(__UpperCamelCase ):
a__ :int = as_tensor(__UpperCamelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : Dict , __A : Any , __A : Dict , __A : List[Any]="" , __A : int="pt" ) ->Optional[int]:
"""simple docstring"""
a__ :Optional[int] = [0, 0, 0]
a__ :Union[str, Any] = [artist] * len(self.version )
a__ :str = [genres] * len(self.version )
a__ , a__ , a__ :int = self.tokenize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a__ , a__ , a__ :str = self._convert_token_to_id(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a__ :List[Any] = [-INFINITY] * len(full_tokens[-1] )
a__ :Any = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _snake_case ( self : str , __A : str , __A : Optional[str] = None ) ->str:
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ :Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCamelCase ) )
a__ :str = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCamelCase ) )
a__ :List[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def _snake_case ( self : str , __A : List[str] , __A : Tuple , __A : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[int] = self.artists_decoder.get(__UpperCamelCase )
a__ :Any = [self.genres_decoder.get(__UpperCamelCase ) for genre in genres_index]
a__ :Tuple = [self.lyrics_decoder.get(__UpperCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : int , __A : Any=1_3 , __A : List[Any]=3_0 , __A : Union[str, Any]=2 , __A : Dict=3 , __A : Any=True , __A : List[str]=True , __A : Tuple=3_2 , __A : List[Any]=5 , __A : Dict=4 , __A : Any=3_7 , __A : int="gelu" , __A : Any=0.1 , __A : List[str]=0.1 , __A : Optional[int]=1_0 , __A : List[str]=0.0_2 , __A : int=3 , __A : Optional[int]=None , __A : str=2 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
_lowercase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 2
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case ( self : Optional[Any] , __A : List[str] , __A : Any , __A : List[str] ):
"""simple docstring"""
_lowercase = DeiTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowercase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : int , __A : List[Any] , __A : List[Any] , __A : List[Any] ):
"""simple docstring"""
_lowercase = DeiTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowercase = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase = 1
_lowercase = DeiTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : Any , __A : str , __A : Union[str, Any] , __A : List[str] ):
"""simple docstring"""
_lowercase = self.type_sequence_label_size
_lowercase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowercase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = DeiTModelTester(self )
_lowercase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def snake_case ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def snake_case ( self : str ):
"""simple docstring"""
pass
def snake_case ( self : int ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__UpperCamelCase )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def snake_case ( self : List[Any] , __A : Any , __A : int , __A : Any=False ):
"""simple docstring"""
_lowercase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowercase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_lowercase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_lowercase = model(**__UpperCamelCase ).loss
loss.backward()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowercase = False
_lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowercase = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
_lowercase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_lowercase = model(**__UpperCamelCase ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
_lowercase = problem_type["title"]
_lowercase = problem_type["num_labels"]
_lowercase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_lowercase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
_lowercase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_lowercase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
_lowercase = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = DeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ) -> Dict:
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__UpperCamelCase )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase = model(**__UpperCamelCase )
# verify the logits
_lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_lowercase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__UpperCamelCase , return_tensors="pt" )
_lowercase = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase = model(__UpperCamelCase )
| 497 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : str = logging.get_logger(__name__)
a : int = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a : int = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> int:
__snake_case = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , a_ : Tuple , a_ : Optional[Any] , a_ : Union[str, Any]="replace" , a_ : Union[str, Any]="<s>" , a_ : int="</s>" , a_ : Optional[int]="</s>" , a_ : str="<s>" , a_ : Tuple="<unk>" , a_ : int="<pad>" , a_ : Union[str, Any]="<mask>" , a_ : List[Any]=False , **a_ : Optional[Any] , ):
"""simple docstring"""
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__snake_case = json.load(__UpperCamelCase )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
__snake_case = merges_handle.read().split("\n" )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def A ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def A ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self : Optional[int] , a_ : Optional[Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__snake_case = tuple(__UpperCamelCase )
__snake_case = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
__snake_case = min(__UpperCamelCase , key=lambda a_ : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(__UpperCamelCase ):
try:
__snake_case = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(__UpperCamelCase )
__snake_case = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__snake_case = get_pairs(__UpperCamelCase )
__snake_case = " ".join(__UpperCamelCase )
__snake_case = word
return word
def A ( self : Optional[int] , a_ : str ):
"""simple docstring"""
__snake_case = []
for token in re.findall(self.pat , __UpperCamelCase ):
__snake_case = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def A ( self : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def A ( self : List[str] , a_ : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(__UpperCamelCase )
def A ( self : List[Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = "".join(__UpperCamelCase )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def A ( self : Optional[Any] , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
__snake_case = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__snake_case = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def A ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Union[str, Any] , a_ : List[Any] , a_ : str=False , **a_ : Optional[int] ):
"""simple docstring"""
__snake_case = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
__snake_case = " " + text
return (text, kwargs)
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def A ( self : List[Any] , a_ : "Conversation" ):
"""simple docstring"""
__snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
__snake_case = " ".join(__UpperCamelCase )
__snake_case = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : Dict = 4 ) -> list[list[int]]:
lowercase : Union[str, Any] =abs(_lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_lowerCAmelCase )] for y in range(_lowerCAmelCase )]
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> list[list[int]]:
return reverse_row(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( __magic_name__ : Dict ) -> list[list[int]]:
return reverse_row(reverse_column(_lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( __magic_name__ : int ) -> list[list[int]]:
return reverse_column(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> list[list[int]]:
lowercase : Union[str, Any] =[list(_lowerCAmelCase ) for x in zip(*_lowerCAmelCase )]
return matrix
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> list[list[int]]:
lowercase : Optional[Any] =matrix[::-1]
return matrix
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> list[list[int]]:
lowercase : Optional[Any] =[x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( __magic_name__ : str ) -> None:
for i in matrix:
print(*_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCamelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCamelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 92 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict =VQModel
lowercase : Optional[int] ="""sample"""
@property
def UpperCamelCase ( self , UpperCamelCase_=(32, 32) ):
lowercase_ :Union[str, Any] = 4
lowercase_ :Optional[int] = 3
lowercase_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
def UpperCamelCase ( self ):
lowercase_ :Dict = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase_ :Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Dict = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__UpperCamelCase )
lowercase_ :str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self ):
lowercase_ :Tuple = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase_ :Optional[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase_ :List[str] = image.to(__UpperCamelCase )
with torch.no_grad():
lowercase_ :Optional[Any] = model(__UpperCamelCase ).sample
lowercase_ :Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase_ :Optional[int] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
| 257 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = RemBertConfig.from_json_file(_lowerCAmelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(_lowerCAmelCase ) ) )
lowercase : int = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 217 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : str) -> str:
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase)
| 477 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __lt__( self: str , UpperCamelCase_: Union[str, Any] ):
return self[-1] < other[-1]
def __eq__( self: Union[str, Any] , UpperCamelCase_: Optional[int] ):
return self[-1] == other[-1]
def _UpperCamelCase ( A ):
UpperCamelCase_ =[]
# sort into stacks
for element in collection:
UpperCamelCase_ =Stack([element] )
UpperCamelCase_ =bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
UpperCamelCase_ =merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 391 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 0 |
a_ : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __a ( __UpperCAmelCase ):
a__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : Optional[int] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Tuple = False
def __a ( __UpperCAmelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a__ = chain(next_number(_lowerCAmelCase ) )
a__ = number_chain
while number < 1000_0000:
a__ = number_chain
number *= 10
return number_chain
def __a ( __UpperCAmelCase = 1000_0000 ):
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 194 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 0 |
from __future__ import annotations
from statistics import mean
def lowerCamelCase__ ( a : Union[str, Any] , a : Optional[int] , a : Dict ) -> list[int]:
"""simple docstring"""
a__ :Any = [0] * no_of_processes
a__ :Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
a__ :Optional[Any] = burst_time[i]
a__ :Tuple = []
a__ :Union[str, Any] = 0
a__ :List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
a__ :int = []
a__ :List[str] = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
a__ :List[str] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
a__ :str = i
total_time += burst_time[target_process]
completed += 1
a__ :Dict = 0
a__ :Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCamelCase__ ( a : Optional[Any] , a : int , a : List[Any] ) -> list[int]:
"""simple docstring"""
a__ :Dict = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
a__ :List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
snake_case__ = 4
snake_case__ = [2, 5, 3, 7]
snake_case__ = [0, 0, 0, 0]
snake_case__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 395 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : List[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 497 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = list(s_dict.keys() )
for key in keys:
__lowercase = R".*/layers_(\d+)"
__lowercase = key
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = re.sub(R"layers_(\d+)" , R"block/\1/layer" , _lowerCAmelCase )
__lowercase = R"(encoder|decoder)\/"
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = re.match(_lowerCAmelCase , _lowerCAmelCase ).groups()
if groups[0] == "encoder":
__lowercase = re.sub(R"/mlp/" , R"/1/mlp/" , _lowerCAmelCase )
__lowercase = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , _lowerCAmelCase )
elif groups[0] == "decoder":
__lowercase = re.sub(R"/mlp/" , R"/2/mlp/" , _lowerCAmelCase )
__lowercase = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , _lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f'''{key} -> {new_key}''' )
__lowercase = s_dict.pop(_lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase = s_dict[key].shape[0]
__lowercase = s_dict[key]
for idx in range(_lowerCAmelCase ):
__lowercase = expert_weihts[idx]
print(f'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(_lowerCAmelCase )
return s_dict
lowerCAmelCase__ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import regex as re
with open(_lowerCAmelCase , "r" ) as f:
__lowercase = f.read()
__lowercase = re.findall(R"(.*) = ([0-9.]*)" , _lowerCAmelCase )
__lowercase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase = float(_lowerCAmelCase ) if "." in value else int(_lowerCAmelCase )
__lowercase = re.findall(R"(.*activations) = \(\'(.*)\',\)" , _lowerCAmelCase )[0]
__lowercase = str(activation[1] )
__lowercase = num_experts
__lowercase = SwitchTransformersConfig(**_lowerCAmelCase )
return config
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="./" , _UpperCAmelCase=8 ) -> Optional[Any]:
'''simple docstring'''
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
__lowercase = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
if gin_file is not None:
__lowercase = convert_gin_to_config(_lowerCAmelCase , _lowerCAmelCase )
else:
__lowercase = SwitchTransformersConfig.from_pretrained(_lowerCAmelCase )
__lowercase = SwitchTransformersForConditionalGeneration(_lowerCAmelCase )
__lowercase = flax_params["target"]
__lowercase = flatten_dict(_lowerCAmelCase , sep="/" )
__lowercase = rename_keys(_lowerCAmelCase )
__lowercase = unflatten_dict(_lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 321 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a : Dict = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> Tuple[int, int]:
def constraint_to_multiple_of(_UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Union[str, Any]=None ):
__snake_case = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__snake_case = math.floor(val / multiple ) * multiple
if x < min_val:
__snake_case = math.ceil(val / multiple ) * multiple
return x
__snake_case = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
__snake_case , __snake_case = get_image_size(_lowerCAmelCase )
__snake_case , __snake_case = output_size
# determine new height and width
__snake_case = output_height / input_height
__snake_case = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__snake_case = scale_width
else:
# fit height
__snake_case = scale_height
__snake_case = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
__snake_case = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Any , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = False , a_ : int = 1 , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
__snake_case = size if size is not None else {"height": 384, "width": 384}
__snake_case = get_size_dict(__UpperCamelCase )
__snake_case = do_resize
__snake_case = size
__snake_case = keep_aspect_ratio
__snake_case = ensure_multiple_of
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : bool = False , a_ : int = 1 , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__snake_case = get_resize_output_image_size(
__UpperCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__UpperCamelCase , multiple=__UpperCamelCase , )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A ( self : Optional[int] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A ( self : Tuple , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Union[str, Any] , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A ( self : Dict , a_ : ImageInput , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : int = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : str , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(__UpperCamelCase )
__snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__snake_case = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__snake_case = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__snake_case = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def A ( self : Any , a_ : List[str] , a_ : List[Tuple] = None ):
"""simple docstring"""
__snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCamelCase ):
__snake_case = target_sizes.numpy()
__snake_case = []
for idx in range(len(__UpperCamelCase ) ):
__snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCamelCase )
__snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
__snake_case = logits.argmax(dim=1 )
__snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = """naver-clova-ix/donut-base-finetuned-docvqa"""
SCREAMING_SNAKE_CASE = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
SCREAMING_SNAKE_CASE = """document_qa"""
SCREAMING_SNAKE_CASE = AutoProcessor
SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE = ["""image""", """text"""]
SCREAMING_SNAKE_CASE = ["""text"""]
def __init__( self : Any , *snake_case_ : str , **snake_case_ : str)-> List[str]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""")
super().__init__(*__UpperCamelCase , **__UpperCamelCase)
def UpperCamelCase ( self : str , snake_case_ : "Image" , snake_case_ : str)-> str:
__lowerCAmelCase ="""<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCAmelCase =task_prompt.replace("""{user_input}""" , __UpperCamelCase)
__lowerCAmelCase =self.pre_processor.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors="""pt""").input_ids
__lowerCAmelCase =self.pre_processor(__UpperCamelCase , return_tensors="""pt""").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self : int , snake_case_ : str)-> str:
return self.model.generate(
inputs["""pixel_values"""].to(self.device) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCamelCase , ).sequences
def UpperCamelCase ( self : Any , snake_case_ : List[Any])-> str:
__lowerCAmelCase =self.pre_processor.batch_decode(__UpperCamelCase)[0]
__lowerCAmelCase =sequence.replace(self.pre_processor.tokenizer.eos_token , """""")
__lowerCAmelCase =sequence.replace(self.pre_processor.tokenizer.pad_token , """""")
__lowerCAmelCase =re.sub(R"""<.*?>""" , """""" , __UpperCamelCase , count=1).strip() # remove first task start token
__lowerCAmelCase =self.pre_processor.tokenajson(__UpperCamelCase)
return sequence["answer"]
| 354 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("""fixtures""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase : Tuple =mock.Mock()
lowercase : Optional[Any] =500
lowercase : Dict ={}
lowercase : int =HTTPError
lowercase : Any ={}
# Download this model to make sure it's in the cache.
lowercase : str =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__UpperCamelCase ) as mock_head:
lowercase : List[str] =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase : Tuple =AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowercase : List[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls : str ):
'''simple docstring'''
lowercase : List[Any] =TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id='''test-image-processor''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowercase : Any =ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
lowercase : str =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowercase : List[str] =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowercase : Dict =CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowercase : List[str] =AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 92 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""table-transformer"""
lowercase : List[Any] =["""past_key_values"""]
lowercase : Union[str, Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=3 , UpperCamelCase_=100 , UpperCamelCase_=6 , UpperCamelCase_=2048 , UpperCamelCase_=8 , UpperCamelCase_=6 , UpperCamelCase_=2048 , UpperCamelCase_=8 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=256 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1.0 , UpperCamelCase_=False , UpperCamelCase_="sine" , UpperCamelCase_="resnet50" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=5 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=1 , UpperCamelCase_=5 , UpperCamelCase_=2 , UpperCamelCase_=0.1 , **UpperCamelCase_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ :List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase_ :List[str] = backbone_config.get('''model_type''' )
lowercase_ :str = CONFIG_MAPPING[backbone_model_type]
lowercase_ :str = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
lowercase_ , lowercase_ , lowercase_ :List[Any] = None, None, None
lowercase_ :Dict = use_timm_backbone
lowercase_ :Union[str, Any] = backbone_config
lowercase_ :Any = num_channels
lowercase_ :Tuple = num_queries
lowercase_ :Dict = d_model
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :Dict = encoder_layers
lowercase_ :Union[str, Any] = encoder_attention_heads
lowercase_ :str = decoder_ffn_dim
lowercase_ :List[Any] = decoder_layers
lowercase_ :Tuple = decoder_attention_heads
lowercase_ :Any = dropout
lowercase_ :Optional[int] = attention_dropout
lowercase_ :List[str] = activation_dropout
lowercase_ :Dict = activation_function
lowercase_ :List[Any] = init_std
lowercase_ :Union[str, Any] = init_xavier_std
lowercase_ :List[Any] = encoder_layerdrop
lowercase_ :Union[str, Any] = decoder_layerdrop
lowercase_ :List[str] = encoder_layers
lowercase_ :List[str] = auxiliary_loss
lowercase_ :Tuple = position_embedding_type
lowercase_ :Optional[Any] = backbone
lowercase_ :Any = use_pretrained_backbone
lowercase_ :Union[str, Any] = dilation
# Hungarian matcher
lowercase_ :Tuple = class_cost
lowercase_ :str = bbox_cost
lowercase_ :List[str] = giou_cost
# Loss coefficients
lowercase_ :str = mask_loss_coefficient
lowercase_ :Optional[int] = dice_loss_coefficient
lowercase_ :Dict = bbox_loss_coefficient
lowercase_ :Tuple = giou_loss_coefficient
lowercase_ :Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ):
return self.d_model
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =version.parse("""1.11""" )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self ):
return 1E-5
@property
def UpperCamelCase ( self ):
return 12
| 257 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 0 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Dict = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *_A : Union[str, Any] , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : Optional[int] , *_A : Any , **_A : List[str] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : Dict , *_A : List[str] , **_A : Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *_A : Optional[Any] , **_A : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : Dict , *_A : Tuple , **_A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : List[str] , *_A : str , **_A : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Tuple = ["""flax""", """transformers"""]
def __init__( self : Dict , *_A : Tuple , **_A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : Any , *_A : str , **_A : str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : List[Any] , **_A : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *_A : Optional[Any] , **_A : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : str , *_A : Optional[Any] , **_A : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __a ( cls : int , *_A : Union[str, Any] , **_A : Optional[int] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] ) | 217 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
from __future__ import annotations
a__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
a__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> list[float]:
_snake_case : Dict = []
_snake_case : List[str] = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
_snake_case : Optional[Any] = -1
for j in range(i + 1 , _lowerCAmelCase ):
if arr[i] < arr[j]:
_snake_case : Any = arr[j]
break
result.append(_lowerCAmelCase )
return result
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> list[float]:
_snake_case : Dict = []
for i, outer in enumerate(_lowerCAmelCase ):
_snake_case : Any = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case : Dict = inner
break
result.append(_lowerCAmelCase )
return result
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] ) -> list[float]:
_snake_case : List[str] = len(_lowerCAmelCase )
_snake_case : Union[str, Any] = []
_snake_case : Optional[Any] = [-1] * arr_size
for index in reversed(range(_lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
a__ = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 477 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = SMALL_MODEL_IDENTIFIER
a__ = '''pt'''
a__ = '''tf'''
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCamelCase )
model_tf.save_pretrained(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = '''mock_framework'''
# Framework provided - return whatever the user provides
a__ = FeaturesManager.determine_framework(self.test_model , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
a__ = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
a__ = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
a__ = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
a__ = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCamelCase ):
a__ = FeaturesManager.determine_framework(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = MagicMock(return_value=__UpperCamelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCamelCase ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a__ = MagicMock(return_value=__UpperCamelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCamelCase ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
a__ = MagicMock(return_value=__UpperCamelCase )
a__ = MagicMock(return_value=__UpperCamelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCamelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCamelCase ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# Both not in environment -> raise error
a__ = MagicMock(return_value=__UpperCamelCase )
a__ = MagicMock(return_value=__UpperCamelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCamelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCamelCase ):
with self.assertRaises(__UpperCamelCase ):
a__ = FeaturesManager.determine_framework(self.test_model )
| 194 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import inspect
import unittest
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self : Any ) ->List[str]:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
a__ :Union[str, Any] = inspect.getmembers(__UpperCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ :Dict = "k-diffusion"
elif backend == "invisible_watermark":
a__ :str = "invisible-watermark"
assert backend in deps, F'''{backend} is not in the deps table!'''
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : List[str] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 497 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__lowercase = dataset_path.split("://" )[1]
return dataset_path
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = not is_remote_filesystem(_lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) )
else:
fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase = None
__lowercase = None
__lowercase = threading.Lock()
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any]=False ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
__snake_case = os.path.abspath(_lowerCAmelCase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
__snake_case = torch.load(_lowerCAmelCase , map_location="cpu" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
__snake_case = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__snake_case = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
return flax_state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_UpperCAmelCase : Optional[int] ) -> bool:
return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__snake_case = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__snake_case = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__snake_case = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__snake_case = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__snake_case = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__snake_case = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__snake_case = pt_tuple_key[-2] + "_v"
if name is not None:
__snake_case = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Tuple:
# convert pytorch tensor to numpy
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__snake_case = flax_model.params["params"]
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(_lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_lowerCAmelCase )
__snake_case = {}
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__snake_case = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> List[str]:
import torch
# Load the index
__snake_case = {}
for shard_file in shard_filenames:
# load using msgpack utils
__snake_case = torch.load(_lowerCAmelCase )
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flax_model.params["params"]
__snake_case = flatten_dict(_lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(_lowerCAmelCase )
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__snake_case = jnp.asarray(_lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__snake_case = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> List[str]:
__snake_case = os.path.abspath(_lowerCAmelCase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
__snake_case = getattr(_lowerCAmelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCAmelCase , "rb" ) as state_f:
try:
__snake_case = from_bytes(_lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__snake_case = jax.tree_util.tree_map(
lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase )
__snake_case = flatten_dict(_lowerCAmelCase )
__snake_case = pt_model.state_dict()
__snake_case = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
__snake_case = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple[0] == pt_model.base_model_prefix
__snake_case = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# conv layer
__snake_case = flax_key_tuple[:-1] + ("weight",)
__snake_case = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# linear layer
__snake_case = flax_key_tuple[:-1] + ("weight",)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
__snake_case = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__snake_case = ".".join(_lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__snake_case = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__snake_case = key.split("." )
__snake_case = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__snake_case = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
__snake_case = key_components[-2] + "_v"
if name is not None:
__snake_case = key_components[:-3] + [name]
__snake_case = ".".join(_lowerCAmelCase )
__snake_case = key
if flax_key in special_pt_names:
__snake_case = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
__snake_case = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_lowerCAmelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"If your task is similar to the task the model of the checkpoint was trained on, "
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
lowercase_ = 2_56
# Modulus to hash a string
lowercase_ = 1_00_00_03
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> bool:
__lowerCAmelCase =len(_lowerCAmelCase )
__lowerCAmelCase =len(_lowerCAmelCase )
if p_len > t_len:
return False
__lowerCAmelCase =0
__lowerCAmelCase =0
__lowerCAmelCase =1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
__lowerCAmelCase =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCAmelCase =(ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCAmelCase =(modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCAmelCase =(
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCAmelCase ( ) -> None:
__lowerCAmelCase ="""abc1abc12"""
__lowerCAmelCase ="""alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCAmelCase ="""alskfjaldsk23adsfabcabc"""
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
__lowerCAmelCase ="""ABABX"""
__lowerCAmelCase ="""ABABZABABYABABX"""
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
__lowerCAmelCase ="""AAAB"""
__lowerCAmelCase ="""ABAAAAAB"""
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
__lowerCAmelCase ="""abcdabcy"""
__lowerCAmelCase ="""abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
__lowerCAmelCase ="""Lü"""
__lowerCAmelCase ="""Lüsai"""
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase ="""Lue"""
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 354 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
lowercase_ :Tuple = parent
lowercase_ :List[str] = batch_size
lowercase_ :str = seq_length
lowercase_ :List[str] = is_training
lowercase_ :List[Any] = use_input_mask
lowercase_ :Union[str, Any] = use_token_type_ids
lowercase_ :int = use_labels
lowercase_ :Any = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :str = num_hidden_layers
lowercase_ :Dict = num_attention_heads
lowercase_ :Union[str, Any] = intermediate_size
lowercase_ :str = hidden_act
lowercase_ :Optional[Any] = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :Dict = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :List[str] = type_sequence_label_size
lowercase_ :Union[str, Any] = initializer_range
lowercase_ :List[str] = num_labels
lowercase_ :List[Any] = num_choices
lowercase_ :List[str] = scope
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :str = None
if self.use_input_mask:
lowercase_ :str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Optional[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Optional[int] = None
lowercase_ :List[Any] = None
lowercase_ :Optional[Any] = None
if self.use_labels:
lowercase_ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = NystromformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowercase_ :Optional[int] = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowercase_ :Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Dict = NystromformerForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = NystromformerForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = self.num_labels
lowercase_ :Any = NystromformerForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = self.num_labels
lowercase_ :str = NystromformerForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.num_choices
lowercase_ :List[Any] = NystromformerForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ :int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ :Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ :Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self ):
lowercase_ :int = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :Dict = config_and_inputs
lowercase_ :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : str =False
lowercase : Tuple =False
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = NystromformerModelTester(self )
lowercase_ :Any = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ :Tuple = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Optional[Any] = NystromformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self ):
lowercase_ :int = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase_ :Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase_ :str = model(__UpperCamelCase )[0]
lowercase_ :Dict = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
lowercase_ :Dict = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
lowercase_ :int = '''the [MASK] of Belgium is Brussels'''
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase_ :Any = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase_ :Any = tokenizer(__UpperCamelCase , return_tensors='''pt''' )
with torch.no_grad():
lowercase_ :List[str] = model(encoding.input_ids ).logits
lowercase_ :Union[str, Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , '''capital''' )
| 257 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
def snake_case( __magic_name__ = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
lowercase : Union[str, Any] = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Tuple = 2
lowercase : List[str] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Dict = i
while n % i == 0:
lowercase : Any = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 217 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> int:
_snake_case : Dict = [1]
_snake_case , _snake_case , _snake_case : Optional[Any] = 0, 0, 0
_snake_case : Any = ugly_nums[ia] * 2
_snake_case : str = ugly_nums[ia] * 3
_snake_case : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , _lowerCAmelCase ):
_snake_case : int = min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
ugly_nums.append(_lowerCAmelCase )
if next_num == next_a:
ia += 1
_snake_case : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_snake_case : List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_snake_case : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_00) = }''')
| 477 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
A_ = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( A , A , A=None ):
if rng is None:
UpperCamelCase_ =random.Random()
UpperCamelCase_ =1
for dim in shape:
total_dims *= dim
UpperCamelCase_ =[]
for _ in range(_lowerCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase_ =np.array(_lowerCAmelCase , dtype=jnp.intaa ).reshape(_lowerCAmelCase )
return output
def _UpperCamelCase ( A , A=None ):
UpperCamelCase_ =ids_tensor(_lowerCAmelCase , vocab_size=2 , rng=_lowerCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCamelCase_ =1
return attn_mask
@require_flax
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[str] = None
__lowerCamelCase : Union[str, Any] = ()
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase_ =2
UpperCamelCase_ =inputs["input_ids"].shape[-1] // 2
UpperCamelCase_ =inputs["input_ids"][:max_batch_size, :sequence_length]
UpperCamelCase_ =jnp.ones_like(__UpperCamelCase )
UpperCamelCase_ =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase_ =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase_ =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =False
UpperCamelCase_ =max_length
UpperCamelCase_ =0
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase_ =getattr(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =pt_model_class(__UpperCamelCase ).eval()
UpperCamelCase_ =load_flax_weights_in_pytorch_model(__UpperCamelCase , flax_model.params )
UpperCamelCase_ =flax_model.generate(__UpperCamelCase ).sequences
UpperCamelCase_ =pt_model.generate(torch.tensor(__UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase_ =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =False
UpperCamelCase_ =max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =True
UpperCamelCase_ =max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =False
UpperCamelCase_ =max_length
UpperCamelCase_ =2
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =False
UpperCamelCase_ =max_length
UpperCamelCase_ =2
UpperCamelCase_ =2
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =True
UpperCamelCase_ =max_length
UpperCamelCase_ =0.8
UpperCamelCase_ =10
UpperCamelCase_ =0.3
UpperCamelCase_ =1
UpperCamelCase_ =8
UpperCamelCase_ =9
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =max_length
UpperCamelCase_ =1
UpperCamelCase_ =8
UpperCamelCase_ =9
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
UpperCamelCase_ =max_length
UpperCamelCase_ =2
UpperCamelCase_ =1
UpperCamelCase_ =8
UpperCamelCase_ =9
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase_ =attention_mask.at[(0, 0)].set(0 )
UpperCamelCase_ =False
UpperCamelCase_ =max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase_ =attention_mask.at[(0, 0)].set(0 )
UpperCamelCase_ =True
UpperCamelCase_ =max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase_ =attention_mask.at[(0, 0)].set(0 )
UpperCamelCase_ =2
UpperCamelCase_ =max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase_ =model_class(__UpperCamelCase )
UpperCamelCase_ =model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
UpperCamelCase_ =jit(model.generate )
UpperCamelCase_ =jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
UpperCamelCase_ =FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCamelCase_ ="Hello world"
UpperCamelCase_ =tokenizer(__UpperCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCamelCase , "do_samples" ):
model.generate(__UpperCamelCase , do_samples=__UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCamelCase , "foo" ):
UpperCamelCase_ ={"foo": "bar"}
model.generate(__UpperCamelCase , **__UpperCamelCase )
| 391 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __a ( __UpperCAmelCase ):
a__ = {}
a__ = job['''started_at''']
a__ = job['''completed_at''']
a__ = date_parser.parse(_lowerCAmelCase )
a__ = date_parser.parse(_lowerCAmelCase )
a__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a__ = start
a__ = end
a__ = duration_in_min
return job_info
def __a ( __UpperCAmelCase , __UpperCAmelCase=None ):
a__ = None
if token is not None:
a__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
a__ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
a__ = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
a__ = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(_lowerCAmelCase ) for job in result['''jobs''']} )
a__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(_lowerCAmelCase ):
a__ = requests.get(url + f"&page={i + 2}" , headers=_lowerCAmelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(_lowerCAmelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a_ : Optional[Any] = parser.parse_args()
a_ : Tuple = get_job_time(args.workflow_run_id)
a_ : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 194 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 0 |
from __future__ import annotations
from typing import Any
def lowerCamelCase__ ( a : str ) -> None:
"""simple docstring"""
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def lowerCamelCase__ ( a : Any , a : Optional[Any] , a : Union[str, Any] ) -> None:
"""simple docstring"""
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 395 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def A__ ( A_ ) -> str:
return "".join(sorted(_lowerCAmelCase ) )
def A__ ( A_ ) -> list[str]:
return word_by_signature[signature(_lowerCAmelCase )]
__magic_name__ : Union[str, Any] = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__magic_name__ : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
__magic_name__ : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__magic_name__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 497 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def snake_case__ ( self , lowerCAmelCase_ = "auto" ):
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def snake_case__ ( self ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=1_6000 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ):
__lowercase = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
__lowercase = self.speech_model.generate(__UpperCamelCase , max_length=48_0000 )
__lowercase = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = len(__UpperCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
__lowercase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
f''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
__lowercase = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__lowercase = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase = 1 / 0.1_82_15 * latents
__lowercase = self.vae.decode(__UpperCamelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 321 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
@add_end_docstrings(_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """generated"""
def __init__( self : List[str] , *a_ : Any , **a_ : Tuple ):
"""simple docstring"""
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def A ( self : str , a_ : List[str]=None , a_ : List[str]=None , a_ : Dict=None , a_ : List[Any]=None , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = {}
if truncation is not None:
__snake_case = truncation
__snake_case = generate_kwargs
__snake_case = {}
if return_tensors is not None and return_type is None:
__snake_case = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__snake_case = return_type
if clean_up_tokenization_spaces is not None:
__snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : List[Any] , a_ : int , a_ : int , a_ : int ):
"""simple docstring"""
return True
def A ( self : Optional[Any] , *a_ : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
__snake_case = ([prefix + arg for arg in args[0]],)
__snake_case = True
elif isinstance(args[0] , __UpperCamelCase ):
__snake_case = (prefix + args[0],)
__snake_case = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__snake_case = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *a_ : int , **a_ : str ):
"""simple docstring"""
__snake_case = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def A ( self : Tuple , a_ : Any , a_ : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **a_ : Optional[int] ):
"""simple docstring"""
__snake_case = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def A ( self : int , a_ : int , **a_ : int ):
"""simple docstring"""
if self.framework == "pt":
__snake_case , __snake_case = model_inputs["input_ids"].shape
elif self.framework == "tf":
__snake_case , __snake_case = tf.shape(model_inputs["input_ids"] ).numpy()
__snake_case = generate_kwargs.get("min_length" , self.model.config.min_length )
__snake_case = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
__snake_case = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
__snake_case = output_ids.shape[0]
if self.framework == "pt":
__snake_case = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__snake_case = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def A ( self : Any , a_ : Union[str, Any] , a_ : List[Any]=ReturnType.TEXT , a_ : Optional[Any]=False ):
"""simple docstring"""
__snake_case = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__snake_case = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__snake_case = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """summary"""
def __call__( self : Optional[int] , *a_ : Union[str, Any] , **a_ : Any ):
"""simple docstring"""
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def A ( self : List[Any] , a_ : int , a_ : int , a_ : int ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """translation"""
def A ( self : Dict , a_ : int , a_ : int , a_ : int ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def A ( self : str , *a_ : Any , a_ : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , a_ : int=None , a_ : Union[str, Any]=None ):
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def A ( self : Optional[int] , a_ : Optional[Any]=None , a_ : str=None , **a_ : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
__snake_case = src_lang
if tgt_lang is not None:
__snake_case = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__snake_case = kwargs.get("task" , self.task )
__snake_case = task.split("_" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
__snake_case = items[1]
__snake_case = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
"""simple docstring"""
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : List[Any])-> Optional[int]:
torch.manual_seed(0)
__lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCAmelCase =PNDMScheduler(skip_prk_steps=__UpperCamelCase)
torch.manual_seed(0)
__lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
__lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCAmelCase =CLIPTextModel(__UpperCamelCase)
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
__lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any]=0)-> Union[str, Any]:
__lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase)).to(__UpperCamelCase)
__lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase =Image.fromarray(np.uinta(__UpperCamelCase)).convert("""RGB""")
if str(__UpperCamelCase).startswith("""mps"""):
__lowerCAmelCase =torch.manual_seed(__UpperCamelCase)
else:
__lowerCAmelCase =torch.Generator(device=__UpperCamelCase).manual_seed(__UpperCamelCase)
__lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : Optional[int])-> Dict:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase)
__lowerCAmelCase =sd_pipe.to(__UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase)
__lowerCAmelCase =self.get_dummy_inputs(__UpperCamelCase)
__lowerCAmelCase =sd_pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : str)-> List[str]:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase)
__lowerCAmelCase =sd_pipe.to(__UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase)
__lowerCAmelCase =self.get_dummy_inputs(__UpperCamelCase)
__lowerCAmelCase ="""french fries"""
__lowerCAmelCase =sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase)
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : List[Any])-> str:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase)
__lowerCAmelCase =sd_pipe.to(__UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase)
__lowerCAmelCase =self.get_dummy_inputs(__UpperCamelCase)
__lowerCAmelCase =[inputs["""prompt"""]] * 2
__lowerCAmelCase =np.array(inputs["""image"""]).astype(np.floataa) / 2_5_5.0
__lowerCAmelCase =torch.from_numpy(__UpperCamelCase).unsqueeze(0).to(__UpperCamelCase)
__lowerCAmelCase =image / 2 + 0.5
__lowerCAmelCase =image.permute(0 , 3 , 1 , 2)
__lowerCAmelCase =image.repeat(2 , 1 , 1 , 1)
__lowerCAmelCase =sd_pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowerCAmelCase =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : Any)-> List[Any]:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""")
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase)
__lowerCAmelCase =sd_pipe.to(__UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase)
__lowerCAmelCase =self.get_dummy_inputs(__UpperCamelCase)
__lowerCAmelCase =sd_pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[round(__UpperCamelCase , 4) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase) for x in slice]))
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : str)-> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def UpperCamelCase ( self : Union[str, Any])-> int:
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase)
__lowerCAmelCase =VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase)
__lowerCAmelCase =pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
__lowerCAmelCase =pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt"""))[0]
__lowerCAmelCase =components["""vae"""]
__lowerCAmelCase =self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""")
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase =vae.encode(inputs[image_param]).latent_dist.mode()
__lowerCAmelCase =pipe(**__UpperCamelCase)[0]
__lowerCAmelCase =np.abs(out - out_latents_inputs).max()
self.assertLess(__UpperCamelCase , 1e-4 , """passing latents as image input generate different result from passing image""")
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : Any)-> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0)-> List[Any]:
__lowerCAmelCase =torch.manual_seed(__UpperCamelCase)
__lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""")
__lowerCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : Dict)-> int:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase)
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase)
__lowerCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : Optional[int])-> Any:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase)
__lowerCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**__UpperCamelCase).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : List[Any])-> Optional[Any]:
__lowerCAmelCase =0
def callback_fn(snake_case_ : int , snake_case_ : int , snake_case_ : torch.FloatTensor) -> None:
__lowerCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase =latents[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
__lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase =latents[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
__lowerCAmelCase =False
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa)
__lowerCAmelCase =pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self : Optional[int])-> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa)
__lowerCAmelCase =pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**__UpperCamelCase)
__lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase ( self : Any)-> Union[str, Any]:
__lowerCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase =inputs["""image"""].resize((5_04, 5_04))
__lowerCAmelCase ="""timbrooks/instruct-pix2pix"""
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
__lowerCAmelCase =pipe(**__UpperCamelCase)
__lowerCAmelCase =output.images[0]
__lowerCAmelCase =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__lowerCAmelCase =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
| 354 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = """luke"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict=50267 , UpperCAmelCase__ : List[Any]=500000 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[int]=256 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Union[str, Any]=3072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[str]=2 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowercase : Optional[Any] =vocab_size
lowercase : Optional[Any] =entity_vocab_size
lowercase : List[Any] =hidden_size
lowercase : List[str] =entity_emb_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Optional[Any] =hidden_act
lowercase : Union[str, Any] =intermediate_size
lowercase : str =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : Union[str, Any] =type_vocab_size
lowercase : List[Any] =initializer_range
lowercase : str =layer_norm_eps
lowercase : Dict =use_entity_aware_attention
lowercase : Dict =classifier_dropout
| 92 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self , UpperCamelCase_ ):
return 0.0
def UpperCamelCase ( _a , _a ) -> tuple[int | float, int | float]:
'''simple docstring'''
lowercase_ :List[str] = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ :Union[str, Any] = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCamelCase ( _a , _a ) -> None:
'''simple docstring'''
lowercase_ :Tuple = 5_1_2
lowercase_ :List[Any] = [1] + [0] * (size - 1)
lowercase_ :List[str] = [filter_type.process(_lowerCAmelCase ) for item in inputs]
lowercase_ :str = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :str = np.abs(np.fft.fft(_lowerCAmelCase ) )
lowercase_ :Dict = 2_0 * np.logaa(_lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase_ :Any = get_bounds(_lowerCAmelCase , _lowerCAmelCase )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(_lowerCAmelCase )
plt.show()
def UpperCamelCase ( _a , _a ) -> None:
'''simple docstring'''
lowercase_ :List[str] = 5_1_2
lowercase_ :int = [1] + [0] * (size - 1)
lowercase_ :List[str] = [filter_type.process(_lowerCAmelCase ) for item in inputs]
lowercase_ :List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :List[Any] = np.angle(np.fft.fft(_lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(_lowerCAmelCase , -2 * pi ) )
plt.show()
| 257 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 0 |
import math
class _A :
def __init__( self : Union[str, Any] , _A : List[Any]=0 ) -> Dict: # a graph with Node 0,1,...,N-1
"""simple docstring"""
lowercase : Optional[int] = n
lowercase : int = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
lowercase : Any = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self : str , _A : Tuple , _A : Union[str, Any] , _A : List[str] ) -> str:
"""simple docstring"""
lowercase : str = w
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self : List[str] , _A : Optional[Any] , _A : Any ) -> Any:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 217 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a__ = """\nHuman: <<task>>\n\nAssistant: """
a__ = """huggingface-tools/default-prompts"""
a__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_snake_case : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_snake_case : Union[str, Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 477 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = """mobilenet_v1"""
def __init__( self: Optional[Any] , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=224 , UpperCamelCase_: int=1.0 , UpperCamelCase_: Tuple=8 , UpperCamelCase_: Optional[int]="relu6" , UpperCamelCase_: Dict=True , UpperCamelCase_: int=0.999 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Any=0.001 , **UpperCamelCase_: Any , ):
super().__init__(**__UpperCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCamelCase_ =num_channels
UpperCamelCase_ =image_size
UpperCamelCase_ =depth_multiplier
UpperCamelCase_ =min_depth
UpperCamelCase_ =hidden_act
UpperCamelCase_ =tf_padding
UpperCamelCase_ =classifier_dropout_prob
UpperCamelCase_ =initializer_range
UpperCamelCase_ =layer_norm_eps
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self: Tuple ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase__ ( self: Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase__ ( self: Any ):
return 1e-4
| 391 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
return 1
def lowerCamelCase__ ( a : Union[str, Any] ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( a : Optional[Any] ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase )
def lowerCamelCase__ ( a : List[Any] ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase )
def lowerCamelCase__ ( a : Optional[Any] ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase )
def lowerCamelCase__ ( a : Any ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase )
def lowerCamelCase__ ( a : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase )
def lowerCamelCase__ ( a : Union[str, Any] ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase )
def lowerCamelCase__ ( a : Optional[Any] = 200 ) -> int:
"""simple docstring"""
return two_pound(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCAmelCase__ = Features({'image': Image()} )
UpperCAmelCase__ = Features({'labels': ClassLabel} )
UpperCAmelCase__ = "image"
UpperCAmelCase__ = "labels"
def snake_case ( self : List[str] , __A : int ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_lowercase = copy.deepcopy(self )
_lowercase = self.label_schema.copy()
_lowercase = features[self.label_column]
_lowercase = label_schema
return task_template
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 497 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
def __lowercase ( _UpperCAmelCase ) -> list:
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
__lowercase = []
def generate(_UpperCAmelCase , _UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase = arr[k - 1], arr[0]
generate(k - 1 , _lowerCAmelCase )
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=10_24 ) -> Optional[int]:
__snake_case , __snake_case = [], []
__snake_case = list(zip(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case , __snake_case = sorted_examples[0]
def is_too_big(_UpperCAmelCase : List[Any] ):
return tok(_lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__snake_case = new_src + " " + src
__snake_case = new_tgt + " " + tgt
if is_too_big(_lowerCAmelCase ) or is_too_big(_lowerCAmelCase ): # cant fit, finalize example
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
__snake_case , __snake_case = src, tgt
else: # can fit, keep adding
__snake_case , __snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
return finished_src, finished_tgt
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case = Path(_lowerCAmelCase )
save_path.mkdir(exist_ok=_lowerCAmelCase )
for split in ["train"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__snake_case = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
__snake_case = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
__snake_case , __snake_case = pack_examples(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F'''packed {split} split from {len(_lowerCAmelCase )} examples -> {len(_lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
for split in ["val", "test"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_lowerCAmelCase , save_path / F'''{split}.target''' )
def __UpperCAmelCase ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCAmelCase , default=1_28 )
parser.add_argument("--data_dir" , type=_lowerCAmelCase )
parser.add_argument("--save_path" , type=_lowerCAmelCase )
__snake_case = parser.parse_args()
__snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str = False ) -> str:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase =f"""Expected string as input, found {type(_lowerCAmelCase )}"""
raise ValueError(_lowerCAmelCase )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase =f"""Expected boolean as use_pascal parameter, found {type(_lowerCAmelCase )}"""
raise ValueError(_lowerCAmelCase )
__lowerCAmelCase =input_str.split("""_""" )
__lowerCAmelCase =0 if use_pascal else 1
__lowerCAmelCase =words[start_index:]
__lowerCAmelCase =[word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase ="""""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 354 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
UpperCamelCase_ = True
from torch.cuda.amp import autocast
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
lowerCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
lowerCamelCase_ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase : Union[str, Any] =logging.WARNING
if model_args.verbose_logging:
lowercase : Union[str, Any] =logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase : Any =logging.INFO
logger.setLevel(_lowerCAmelCase )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase_ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = "longest"
lowerCamelCase_ = None
lowerCamelCase_ = None
def __call__( self : Optional[int] , UpperCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
# reformat list to dict and set to pytorch format
lowercase : str =self.feature_extractor.pad(
__UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase : str =self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowercase : Union[str, Any] =batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase : str =self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowercase : List[str] =torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase : Optional[Any] =1
lowercase : Tuple =attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase : List[str] =_compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCamelCase , min_masks=2 , )
return batch
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , *UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : str=1.0 , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
lowercase : List[Any] =0
lowercase : int =max_gumbel_temp
lowercase : List[str] =min_gumbel_temp
lowercase : Union[str, Any] =gumbel_temp_decay
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : nn.Module , UpperCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
lowercase : Optional[Any] =self._prepare_inputs(__UpperCamelCase )
if self.use_amp:
with autocast():
lowercase : Optional[int] =self.compute_loss(__UpperCamelCase , __UpperCamelCase )
else:
lowercase : List[str] =self.compute_loss(__UpperCamelCase , __UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase : Optional[Any] =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase : Any =loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowercase : Optional[int] =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowerCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : List[str] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()
configure_logger(_lowerCAmelCase , _lowerCAmelCase )
# Downloading and loading a dataset from the hub.
lowercase : Optional[Any] =load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase : Optional[Any] =DatasetDict()
lowercase : Optional[int] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowercase : Optional[int] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase : Dict =DatasetDict()
lowercase : List[str] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase : Dict =WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCAmelCase )
def prepare_dataset(__magic_name__ : Optional[int] ):
# check that all files have the correct sampling rate
lowercase , lowercase : int =librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase : Optional[Any] =datasets.map(
_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowercase : Dict =vectorized_datasets.filter(
lambda __magic_name__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__magic_name__ : List[Any] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase : Optional[int] =vectorized_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase : Dict =WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowercase : Optional[int] =WavaVecaForPreTraining(_lowerCAmelCase )
lowercase : List[Any] =DataCollatorForWavaVecaPretraining(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
lowercase : str =WavaVecaPreTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=_lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 92 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=[1, 2, 1] , UpperCamelCase_=[2, 2, 4] , UpperCamelCase_=2 , UpperCamelCase_=2.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.02 , UpperCamelCase_=1E-5 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=8 , ):
lowercase_ :List[Any] = parent
lowercase_ :Union[str, Any] = batch_size
lowercase_ :Union[str, Any] = image_size
lowercase_ :Any = patch_size
lowercase_ :int = num_channels
lowercase_ :Dict = embed_dim
lowercase_ :int = depths
lowercase_ :List[Any] = num_heads
lowercase_ :Any = window_size
lowercase_ :int = mlp_ratio
lowercase_ :Dict = qkv_bias
lowercase_ :Dict = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Optional[Any] = drop_path_rate
lowercase_ :str = hidden_act
lowercase_ :int = use_absolute_embeddings
lowercase_ :List[str] = patch_norm
lowercase_ :Dict = layer_norm_eps
lowercase_ :Tuple = initializer_range
lowercase_ :Optional[int] = is_training
lowercase_ :List[Any] = scope
lowercase_ :List[Any] = use_labels
lowercase_ :str = type_sequence_label_size
lowercase_ :int = encoder_stride
def UpperCamelCase ( self ):
lowercase_ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Dict = None
if self.use_labels:
lowercase_ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[str] = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :str = model(__UpperCamelCase )
lowercase_ :str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ :str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ :Dict = 1
lowercase_ :str = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[str] = self.type_sequence_label_size
lowercase_ :Any = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ :Optional[int] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :str = config_and_inputs
lowercase_ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase : str =(
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase : str =False
lowercase : List[Any] =False
lowercase : int =False
lowercase : Optional[int] =False
def UpperCamelCase ( self ):
lowercase_ :str = SwinvaModelTester(self )
lowercase_ :List[Any] = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Any = model_class(__UpperCamelCase )
lowercase_ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :int = [*signature.parameters.keys()]
lowercase_ :List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Tuple = True
for model_class in self.all_model_classes:
lowercase_ :Union[str, Any] = True
lowercase_ :Tuple = False
lowercase_ :List[Any] = True
lowercase_ :Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ :int = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
lowercase_ :int = outputs.attentions
lowercase_ :Optional[Any] = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ :str = True
lowercase_ :Optional[Any] = config.window_size**2
lowercase_ :Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ :Dict = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
lowercase_ :Union[str, Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowercase_ :Union[str, Any] = len(__UpperCamelCase )
# Check attention is always last and order is fine
lowercase_ :List[str] = True
lowercase_ :Optional[int] = True
lowercase_ :Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ :Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowercase_ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase_ :Dict = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
lowercase_ :Dict = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ :List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
lowercase_ :Dict = outputs.hidden_states
lowercase_ :Union[str, Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
lowercase_ :Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ :Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ :Any = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = reshaped_hidden_states[0].shape
lowercase_ :int = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase_ :List[str] = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :int = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Dict = 3
lowercase_ :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ :Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ :Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase_ :Optional[int] = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :Tuple = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Optional[Any] = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :List[Any] = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
lowercase_ :int = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
lowercase_ :List[Any] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__UpperCamelCase )
lowercase_ :Any = self.default_image_processor
lowercase_ :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase_ :Any = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ :int = model(**__UpperCamelCase )
# verify the logits
lowercase_ :Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowercase_ :List[str] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 257 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
assert column_title.isupper()
lowercase : str = 0
lowercase : Any = len(_lowerCAmelCase ) - 1
lowercase : int = 0
while index >= 0:
lowercase : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 217 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 477 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A_ = "main"
# Default branch name
A_ = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A_ = "aaaaaaa"
# This commit does not exist, so we should 404.
A_ = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A_ = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _UpperCamelCase ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _UpperCamelCase ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Optional[Any] ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCamelCase__ ( self: List[str] ):
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["start_positions", "end_positions"] )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
@require_tf
def UpperCamelCase__ ( self: Union[str, Any] ):
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["start_positions", "end_positions"] )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
@require_flax
def UpperCamelCase__ ( self: Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 391 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 0 |
import os
def __a ( ):
with open(os.path.dirname(_lowerCAmelCase ) + '''/grid.txt''' ) as f:
a__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
a__ = 0
# right
for i in range(20 ):
for j in range(17 ):
a__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a__ = temp
# down
for i in range(17 ):
for j in range(20 ):
a__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 194 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase__ ( a : List[str] , a : Optional[int]=False , a : List[str]=False ) -> int:
"""simple docstring"""
a__ :Dict = "backbone." if is_semantic else ""
a__ :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(F'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(F'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(F'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( a : Tuple , a : List[Any] , a : Union[str, Any]=False , a : int=False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
a__ :Any = "backbone." if is_semantic else ""
# queries, keys and values
a__ :Tuple = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
a__ :Tuple = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
a__ :List[str] = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
a__ :str = in_proj_weight[
: config.hidden_size, :
]
a__ :str = q_bias
a__ :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ :Tuple = in_proj_weight[
-config.hidden_size :, :
]
a__ :Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a__ :Union[str, Any] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
a__ :List[str] = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
a__ :Dict = gamma_a
a__ :int = gamma_a
def lowerCamelCase__ ( a : Union[str, Any] , a : List[str] , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a__ :str = dct.pop(_lowerCAmelCase )
a__ :List[str] = val
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
a__ :str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ :Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a : Optional[int] , a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
a__ :str = False if "rvlcdip" in checkpoint_url else True
a__ :Optional[int] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a__ :List[Any] = 1_024
a__ :List[Any] = 4_096
a__ :Dict = 24
a__ :Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
a__ :List[str] = 16
a__ :Dict = "huggingface/label-files"
a__ :Any = "rvlcdip-id2label.json"
a__ :List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
a__ :Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
a__ :List[Any] = idalabel
a__ :int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a__ :Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
a__ :List[str] = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
a__ :Dict = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
a__ :Union[str, Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
a__ :Dict = prepare_img()
a__ :Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
a__ :int = encoding["pixel_values"]
a__ :Dict = model(_lowerCAmelCase )
a__ :int = outputs.logits
# verify logits
a__ :int = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
a__ :Any = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
a__ :Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
snake_case__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 395 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__magic_name__ : Optional[int] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : Any = logging.getLogger()
def A__ ( ) -> Optional[Any]:
_lowercase = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowercase = parser.parse_args()
return args.f
def A__ ( A_ , A_="eval" ) -> Optional[Any]:
_lowercase = os.path.join(_lowerCAmelCase , F"""{split}_results.json""" )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F"""can\'t find {path}""" )
__magic_name__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_glue.main()
_lowercase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_clm_flax.main()
_lowercase = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_summarization_flax.main()
_lowercase = get_results(__UpperCamelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_mlm_flax.main()
_lowercase = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_ta_mlm_flax.main()
_lowercase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_lowercase = 7 if get_gpu_count() > 1 else 2
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_ner.main()
_lowercase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_qa.main()
_lowercase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 497 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=10 , lowerCAmelCase_=160 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=4000 , lowerCAmelCase_=False , lowerCAmelCase_=True , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ):
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
__lowercase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
__lowercase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case__ ( self ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
__lowercase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(__UpperCamelCase )
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowercase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def snake_case__ ( self ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowercase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
__lowercase = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1E-4 ) )
def snake_case__ ( self ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1E-3 ) )
| 321 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> int:
if not postfix_notation:
return 0
__snake_case = {"+", "-", "*", "/"}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 0 |
import os
from collections.abc import Iterator
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowerCAmelCase ):
__lowerCAmelCase =[d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowerCAmelCase , _lowerCAmelCase ).lstrip("""./""" )
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
return f"""{i * ' '}*""" if i else "\n##"
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> str:
__lowerCAmelCase =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(_lowerCAmelCase )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] = "." ) -> None:
__lowerCAmelCase =""""""
for filepath in sorted(good_file_paths(_lowerCAmelCase ) ):
__lowerCAmelCase , __lowerCAmelCase =os.path.split(_lowerCAmelCase )
if filepath != old_path:
__lowerCAmelCase =print_path(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase =(filepath.count(os.sep ) + 1) if filepath else 0
__lowerCAmelCase =f"""{filepath}/{filename}""".replace(""" """ , """%20""" )
__lowerCAmelCase =os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f"""{md_prefix(_lowerCAmelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 354 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowercase__ , )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = RobertaConfig
lowerCamelCase_ = """roberta"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
lowercase : List[str] =RobertaEmbeddings(__UpperCamelCase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowercase__ , )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = RobertaConfig
lowerCamelCase_ = """roberta"""
def __init__( self : Dict , UpperCAmelCase__ : Any ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
lowercase : Tuple =config.num_labels
lowercase : str =config.num_hidden_layers
lowercase : Any =DeeRobertaModel(__UpperCamelCase )
lowercase : Dict =nn.Dropout(config.hidden_dropout_prob )
lowercase : Dict =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__UpperCamelCase )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[int]=-1 , UpperCAmelCase__ : List[Any]=False , ):
'''simple docstring'''
lowercase : str =self.num_layers
try:
lowercase : List[Any] =self.roberta(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , position_ids=__UpperCamelCase , head_mask=__UpperCamelCase , inputs_embeds=__UpperCamelCase , )
lowercase : Tuple =outputs[1]
lowercase : int =self.dropout(__UpperCamelCase )
lowercase : str =self.classifier(__UpperCamelCase )
lowercase : Dict =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase : Optional[int] =e.message
lowercase : Optional[int] =e.exit_layer
lowercase : str =outputs[0]
if not self.training:
lowercase : str =entropy(__UpperCamelCase )
lowercase : Dict =[]
lowercase : Union[str, Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase : Optional[int] =MSELoss()
lowercase : Optional[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : Any =CrossEntropyLoss()
lowercase : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase : Optional[int] =[]
for highway_exit in outputs[-1]:
lowercase : int =highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase : Union[str, Any] =MSELoss()
lowercase : Union[str, Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : Optional[Any] =CrossEntropyLoss()
lowercase : Optional[int] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCamelCase )
if train_highway:
lowercase : str =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase : Any =(loss,) + outputs
if not self.training:
lowercase : Tuple =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase : Union[str, Any] =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 92 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
lowercase_ :str = self.model.config
else:
lowercase_ :Union[str, Any] = config
lowercase_ :Any = data_args
lowercase_ :Tuple = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ :Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ :Union[str, Any] = label_smoothed_nll_loss
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.optimizer is None:
lowercase_ :int = ['''bias''', '''LayerNorm.weight''']
lowercase_ :str = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ :Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ :Optional[int] = Adafactor
lowercase_ :str = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ :List[str] = AdamW
lowercase_ :Union[str, Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ :Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
lowercase_ :int = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
lowercase_ :Tuple = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
lowercase_ :Union[str, Any] = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ :Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ :Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ :Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def UpperCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ :List[str] = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
lowercase_ :int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ :List[Any] = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
lowercase_ :Dict = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
lowercase_ :List[Any] = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
lowercase_ , lowercase_ :List[str] = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = inputs.pop('''labels''' )
lowercase_ , lowercase_ :Dict = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ):
lowercase_ :Dict = self._prepare_inputs(__UpperCamelCase )
lowercase_ :Tuple = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ :List[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ :int = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
lowercase_ :List[str] = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ :Union[str, Any] = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase_ :List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ :Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ :Optional[Any] = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
# If PAD token is not defined at least EOS token has to be defined
lowercase_ :List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
lowercase_ :Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ :Optional[int] = tensor
return padded_tensor
| 257 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : int = 101 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = length
def __len__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.length
def __getitem__( self : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return i
class _A :
def __call__( self : Dict , _A : List[Any] ) -> Tuple:
"""simple docstring"""
return {"input_ids": torch.tensor(__UpperCamelCase ), "labels": torch.tensor(__UpperCamelCase )}
class _A ( nn.Module ):
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase : Union[str, Any] = nn.Linear(120 , 80 )
def __a ( self : int , _A : List[str] , _A : Dict=None ) -> str:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _A ( _lowerCamelCase ):
@require_torch_neuroncore
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : int = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase : List[str] = self.get_auto_remove_tmp_dir()
lowercase : List[str] = f"""--output_dir {output_dir}""".split()
lowercase : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _A ( _lowerCamelCase ):
@require_torch_multi_gpu
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : Tuple = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase : Optional[int] = self.get_auto_remove_tmp_dir()
lowercase : Optional[int] = f"""--output_dir {output_dir}""".split()
lowercase : int = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase_ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowerCAmelCase_ = DummyDataset(dataset_length)
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Any = list(range(len(_lowerCAmelCase ) ) )
lowercase : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCAmelCase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = 2
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = None | 217 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """funnel"""
snake_case_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Dict , lowerCAmelCase : str=3_0522 , lowerCAmelCase : int=[4, 4, 4] , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : List[str]=64 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : Optional[Any]="gelu_new" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=1E-9 , lowerCAmelCase : int="mean" , lowerCAmelCase : Tuple="relative_shift" , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=True , lowerCAmelCase : int=True , **lowerCAmelCase : int , ) -> Dict:
"""simple docstring"""
_snake_case : str = vocab_size
_snake_case : Dict = block_sizes
_snake_case : List[str] = [1] * len(__UpperCamelCase) if block_repeats is None else block_repeats
assert len(__UpperCamelCase) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
_snake_case : Union[str, Any] = num_decoder_layers
_snake_case : List[Any] = d_model
_snake_case : int = n_head
_snake_case : Tuple = d_head
_snake_case : Dict = d_inner
_snake_case : List[Any] = hidden_act
_snake_case : str = hidden_dropout
_snake_case : Dict = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Optional[Any] = initializer_range
_snake_case : List[str] = initializer_std
_snake_case : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
_snake_case : List[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
_snake_case : Optional[int] = attention_type
_snake_case : List[Any] = separate_cls
_snake_case : Optional[int] = truncate_seq
_snake_case : List[Any] = pool_q_only
super().__init__(**__UpperCamelCase)
@property
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
return sum(self.block_sizes)
@num_hidden_layers.setter
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""")
@property
def UpperCamelCase_ ( self : int) -> Optional[Any]:
"""simple docstring"""
return len(self.block_sizes)
@num_blocks.setter
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""")
| 477 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = XLMTokenizer
__lowerCamelCase : List[Any] = False
def UpperCamelCase__ ( self: int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase_ =dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCamelCase_ =["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ ="lower newer"
UpperCamelCase_ ="lower newer"
return input_text, output_text
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_ ="lower"
UpperCamelCase_ =["low", "er</w>"]
UpperCamelCase_ =tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =tokens + ["<unk>"]
UpperCamelCase_ =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCamelCase_ =tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ =tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 391 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
a__ = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# A temporary array to store all combination one by one
a__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a_ : Optional[int] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 194 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _a ,unittest.TestCase):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ :Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
a__ :Optional[int] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
a__ :str = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
a__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def _snake_case ( self : List[str] , __A : Any ) ->Union[str, Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self : str ) ->List[Any]:
"""simple docstring"""
a__ :List[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a__ :Dict = "lower"
a__ :List[str] = ["low", "er</w>"]
a__ :str = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
a__ :List[Any] = tokens + ["<unk>"]
a__ :Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _snake_case ( self : List[Any] , __A : str=15 ) ->int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ :Dict = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
a__ :List[Any] = "This is a simple input"
a__ :Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
a__ :List[str] = ("This is a simple input", "This is a pair")
a__ :Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
def _snake_case ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase_ ( _a):
pass
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A_ , A_ = None , A_ = None ) -> None:
if start is None:
_lowercase = 0
if end is None:
_lowercase = len(_lowerCAmelCase ) - 1
if start >= end:
return
_lowercase = (start + end) // 2
slowsort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
slowsort(_lowerCAmelCase , mid + 1 , _lowerCAmelCase )
if sequence[end] < sequence[mid]:
_lowercase , _lowercase = sequence[mid], sequence[end]
slowsort(_lowerCAmelCase , _lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 497 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """openai-gpt"""
__lowerCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=4_0478 , lowerCAmelCase_=512 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_="cls_index" , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ):
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = afn
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = summary_type
__lowercase = summary_use_proj
__lowercase = summary_activation
__lowercase = summary_first_dropout
__lowercase = summary_proj_to_labels
super().__init__(**__UpperCamelCase )
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = (1 - _cos) / 2
__snake_case = 1 - _cos
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = (1 + _cos) / 2
__snake_case = -1 - _cos
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Dict = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = _sin / 2
__snake_case = 0
__snake_case = -ba
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 1 - alpha
__snake_case = -2 * _cos
__snake_case = 1 + alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = 1 + alpha * big_a
__snake_case = -2 * _cos
__snake_case = 1 - alpha * big_a
__snake_case = 1 + alpha / big_a
__snake_case = -2 * _cos
__snake_case = 1 - alpha / big_a
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = (big_a + 1) - (big_a - 1) * _cos
__snake_case = (big_a + 1) + (big_a - 1) * _cos
__snake_case = (big_a - 1) - (big_a + 1) * _cos
__snake_case = (big_a - 1) + (big_a + 1) * _cos
__snake_case = 2 * sqrt(_lowerCAmelCase ) * alpha
__snake_case = big_a * (pmc + aaa)
__snake_case = 2 * big_a * mpc
__snake_case = big_a * (pmc - aaa)
__snake_case = ppmc + aaa
__snake_case = -2 * pmpc
__snake_case = ppmc - aaa
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = (big_a + 1) - (big_a - 1) * _cos
__snake_case = (big_a + 1) + (big_a - 1) * _cos
__snake_case = (big_a - 1) - (big_a + 1) * _cos
__snake_case = (big_a - 1) + (big_a + 1) * _cos
__snake_case = 2 * sqrt(_lowerCAmelCase ) * alpha
__snake_case = big_a * (ppmc + aaa)
__snake_case = -2 * big_a * pmpc
__snake_case = big_a * (ppmc - aaa)
__snake_case = pmc + aaa
__snake_case = 2 * mpc
__snake_case = pmc - aaa
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __a :
def __init__( self : Dict , snake_case_ : int = 14)-> str:
if group not in primes:
raise ValueError("""Unsupported Group""")
__lowerCAmelCase =primes[group]["""prime"""]
__lowerCAmelCase =primes[group]["""generator"""]
__lowerCAmelCase =int(hexlify(urandom(32)) , base=16)
def UpperCamelCase ( self : Dict)-> int:
return hex(self.__private_key)[2:]
def UpperCamelCase ( self : int)-> Tuple:
__lowerCAmelCase =pow(self.generator , self.__private_key , self.prime)
return hex(__UpperCamelCase)[2:]
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : int)-> List[Any]:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__UpperCamelCase , (self.prime - 1) // 2 , self.prime) == 1
)
def UpperCamelCase ( self : Optional[int] , snake_case_ : str)-> List[Any]:
__lowerCAmelCase =int(__UpperCamelCase , base=16)
if not self.is_valid_public_key(__UpperCamelCase):
raise ValueError("""Invalid public key""")
__lowerCAmelCase =pow(__UpperCamelCase , self.__private_key , self.prime)
return shaaaa(str(__UpperCamelCase).encode()).hexdigest()
@staticmethod
def UpperCamelCase ( snake_case_ : int , snake_case_ : int)-> Optional[Any]:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__UpperCamelCase , (prime - 1) // 2 , __UpperCamelCase) == 1
)
@staticmethod
def UpperCamelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : int = 14)-> List[str]:
__lowerCAmelCase =int(__UpperCamelCase , base=16)
__lowerCAmelCase =int(__UpperCamelCase , base=16)
__lowerCAmelCase =primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__UpperCamelCase , __UpperCamelCase):
raise ValueError("""Invalid public key""")
__lowerCAmelCase =pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
return shaaaa(str(__UpperCamelCase).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> List[Any]: # noqa: E741
lowercase : Optional[int] =len(_lowerCAmelCase )
lowercase : Dict =0
lowercase : Optional[Any] =[0] * n
lowercase : Union[str, Any] =[False] * n
lowercase : Optional[int] =[False] * n
def dfs(__magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : int ):
if parent == root:
out_edge_count += 1
lowercase : Dict =True
lowercase : Optional[int] =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase : Union[str, Any] =dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase : Optional[Any] =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase : Optional[int] =True
# AP found via cycle
if at == low[to]:
lowercase : str =True
else:
lowercase : int =min(low[at] , _lowerCAmelCase )
return out_edge_count
for i in range(_lowerCAmelCase ):
if not visited[i]:
lowercase : int =0
lowercase : List[Any] =dfs(_lowerCAmelCase , _lowerCAmelCase , -1 , _lowerCAmelCase )
lowercase : Optional[int] =out_edge_count > 1
for x in range(len(_lowerCAmelCase ) ):
if is_art[x] is True:
print(_lowerCAmelCase )
# Adjacency list of graph
UpperCamelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] ="""distilbert"""
lowercase : Optional[int] ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , UpperCamelCase_=3_0522 , UpperCamelCase_=512 , UpperCamelCase_=False , UpperCamelCase_=6 , UpperCamelCase_=12 , UpperCamelCase_=768 , UpperCamelCase_=4 * 768 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.02 , UpperCamelCase_=0.1 , UpperCamelCase_=0.2 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
lowercase_ :str = vocab_size
lowercase_ :int = max_position_embeddings
lowercase_ :str = sinusoidal_pos_embds
lowercase_ :int = n_layers
lowercase_ :Tuple = n_heads
lowercase_ :Optional[int] = dim
lowercase_ :List[Any] = hidden_dim
lowercase_ :Optional[int] = dropout
lowercase_ :str = attention_dropout
lowercase_ :Dict = activation
lowercase_ :int = initializer_range
lowercase_ :str = qa_dropout
lowercase_ :int = seq_classif_dropout
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def UpperCamelCase ( self ):
if self.task == "multiple-choice":
lowercase_ :List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 257 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _A : List[Any]="</s>" , _A : str="<unk>" , _A : str="<pad>" , _A : Optional[Any]=125 , _A : Any=None , **_A : Dict , ) -> Optional[int]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowercase : Any = [f"""<extra_id_{i}>""" for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase : Tuple = len(set(filter(lambda _A : bool('''extra_id''' in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
lowercase : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
lowercase : List[str] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
lowercase : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
super().__init__(
eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowercase : Dict = extra_ids
lowercase : Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
lowercase : Optional[Any] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowercase : Union[str, Any] = len(self.special_tokens_encoder )
lowercase : Optional[int] = len(__UpperCamelCase )
for i, token in enumerate(__UpperCamelCase ):
lowercase : Dict = self.vocab_size + i - n
lowercase : Tuple = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __a ( self : Dict ) -> str:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self : Dict , _A : List[int] ) -> List[str]:
"""simple docstring"""
if len(__UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __a ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[str]:
"""simple docstring"""
lowercase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = self._add_eos_if_not_present(__UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
lowercase : Optional[Any] = self._add_eos_if_not_present(__UpperCamelCase )
return token_ids_a + token_ids_a
def __a ( self : str , _A : str ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = [chr(__UpperCamelCase ) for i in text.encode('''utf-8''' )]
return tokens
def __a ( self : Optional[int] , _A : Dict ) -> Tuple:
"""simple docstring"""
if token in self.special_tokens_encoder:
lowercase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowercase : Any = self.added_tokens_encoder[token]
elif len(__UpperCamelCase ) != 1:
lowercase : Dict = self.unk_token_id
else:
lowercase : Optional[int] = ord(__UpperCamelCase ) + self._num_special_tokens
return token_id
def __a ( self : List[str] , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if index in self.special_tokens_decoder:
lowercase : List[str] = self.special_tokens_decoder[index]
else:
lowercase : List[Any] = chr(index - self._num_special_tokens )
return token
def __a ( self : Any , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : int = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
lowercase : Union[str, Any] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
lowercase : str = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
lowercase : Optional[Any] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
lowercase : str = token.encode('''utf-8''' )
else:
lowercase : List[Any] = bytes([ord(__UpperCamelCase )] )
bstring += tok_string
lowercase : Optional[Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def __a ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> str:
"""simple docstring"""
return () | 217 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.