code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :list[list[int]] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowerCAmelCase__ ) != 0:
__SCREAMING_SNAKE_CASE : Tuple = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
__SCREAMING_SNAKE_CASE : Optional[Any] = rows
else:
__SCREAMING_SNAKE_CASE : Tuple = []
def __magic_name__( self :Dict ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __magic_name__( self :Any ) -> int:
return len(self.rows )
@property
def __magic_name__( self :List[Any] ) -> int:
return len(self.rows[0] )
@property
def __magic_name__( self :int ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __magic_name__( self :List[Any] ) -> bool:
return self.order[0] == self.order[1]
def __magic_name__( self :Optional[Any] ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __magic_name__( self :List[Any] ) -> bool:
return bool(self.determinant() )
def __magic_name__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Any = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __magic_name__( self :List[str] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __magic_name__( self :str ) -> Matrix:
__SCREAMING_SNAKE_CASE : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self :Any ) -> str:
return str(self.rows )
def __str__( self :List[Any] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowerCAmelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def __magic_name__( self :List[Any] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = self.rows[0:position] + [row] + self.rows[position:]
def __magic_name__( self :Tuple , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : List[str] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
__SCREAMING_SNAKE_CASE : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :Optional[Any] , lowerCAmelCase__ :object ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :Any , lowerCAmelCase__ :object ) -> bool:
return not self == other
def __neg__( self :Any ) -> Matrix:
return self * -1
def __add__( self :List[Any] , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Optional[int] , lowerCAmelCase__ :Matrix | int | float ) -> Matrix:
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Matrix:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __magic_name__( cls :Optional[int] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->str:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =pipeline(
'document-question-answering' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Tuple =INVOICE_URL
lowerCamelCase__ : Optional[Any] =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
lowerCamelCase__ : Optional[Any] ='What is the placebo?'
lowerCamelCase__ : List[str] =[
{
'image': load_image(lowerCamelCase_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] =dqa_pipeline(lowerCamelCase_ , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [
[
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCamelCase__ : Any =INVOICE_URL
lowerCamelCase__ : Union[str, Any] ='How many cats are there?'
lowerCamelCase__ : List[Any] =[
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCamelCase__ : Dict =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
lowerCamelCase__ : int =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase__ : str ='./tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase__ : str ='./tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : Tuple =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , words=lowerCamelCase_ , boxes=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : int =pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCamelCase__ : Dict =INVOICE_URL
lowerCamelCase__ : int ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[str] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : int =pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCamelCase__ : Tuple =INVOICE_URL
lowerCamelCase__ : Any ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[Any] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[Any] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : int =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , )
lowerCamelCase__ : int =INVOICE_URL
lowerCamelCase__ : Tuple ='What is the invoice number?'
lowerCamelCase__ : Optional[Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase__ : Optional[Any] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCamelCase__ : Tuple =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Any =pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , max_seq_len=50 , )
lowerCamelCase__ : Dict =INVOICE_URL
lowerCamelCase__ : Optional[Any] ='What is the invoice number?'
lowerCamelCase__ : Tuple =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : Tuple =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCamelCase__ : str =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase__ : Union[str, Any] =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCamelCase__ : Union[str, Any] =INVOICE_URL
lowerCamelCase__ : Union[str, Any] ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass | 126 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 256047
UpperCamelCase_ = 256145
@require_sentencepiece
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = NllbTokenizer
A__ : Any = NllbTokenizerFast
A__ : Optional[Any] = True
A__ : Tuple = True
A__ : List[Any] = {}
def A__ ( self: Optional[int] ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : List[Any] = NllbTokenizer(lowerCamelCase_ ,keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Optional[Any] = NllbTokenizer(lowerCamelCase_ ,keep_accents=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : Optional[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ : str = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : int = tokenizer_r.save_pretrained(lowerCamelCase_ ,legacy_format=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
def A__ ( self: Dict ) -> Optional[int]:
if not self.test_seqaseq:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
UpperCAmelCase_ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase_ : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ ,tgt_texts=lowerCamelCase_ ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase_ : List[Any] = tokenizer.prepare_seqaseq_batch(
lowerCamelCase_ ,tgt_texts=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
UpperCAmelCase_ : int = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn("""decoder_input_ids""" ,lowerCamelCase_ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def A__ ( self: Any ) -> Dict:
pass
def A__ ( self: Dict ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Optional[int] = [AddedToken("""<special>""" ,lstrip=lowerCamelCase_ )]
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.encode("""<special>""" ,add_special_tokens=lowerCamelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(
lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase_ : Union[str, Any] = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = "facebook/nllb-200-distilled-600M"
A__ : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ : List[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ : List[str] = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def A__ ( cls: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" )
UpperCAmelCase_ : Dict = 1
return cls
def A__ ( self: Dict ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,256057 )
def A__ ( self: str ) -> str:
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Tuple:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase_ : Dict = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCAmelCase_ : Any = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,lowerCamelCase_ )
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: int ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[256203, 3] )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = NllbTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ )
@require_torch
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : List[Any] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase_ : str = shift_tokens_right(
batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
UpperCAmelCase_ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase_ : Optional[int] = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase_ : Any = targets["""input_ids"""]
UpperCAmelCase_ : int = shift_tokens_right(
lowerCamelCase_ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Dict = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} ,)
@require_torch
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 59 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 59 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_UpperCamelCase: Dict = logging.get_logger(__name__)
_UpperCamelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_UpperCamelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__ ( lowerCamelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LEDTokenizer
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[Any], lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : List[str]=None, lowerCAmelCase : Tuple=None, lowerCAmelCase : Dict="replace", lowerCAmelCase : int="<s>", lowerCAmelCase : Any="</s>", lowerCAmelCase : Optional[Any]="</s>", lowerCAmelCase : Optional[Any]="<s>", lowerCAmelCase : Optional[Any]="<unk>", lowerCAmelCase : List[str]="<pad>", lowerCAmelCase : int="<mask>", lowerCAmelCase : int=False, lowerCAmelCase : str=True, **lowerCAmelCase : Tuple, ) -> Optional[Any]:
super().__init__(
__snake_case, __snake_case, tokenizer_file=__snake_case, errors=__snake_case, bos_token=__snake_case, eos_token=__snake_case, sep_token=__snake_case, cls_token=__snake_case, unk_token=__snake_case, pad_token=__snake_case, mask_token=__snake_case, add_prefix_space=__snake_case, trim_offsets=__snake_case, **__snake_case, )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', __snake_case ) != add_prefix_space:
lowercase : List[Any] = getattr(__snake_case, pre_tok_state.pop('type' ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : Optional[Any] = pre_tok_class(**__snake_case )
lowercase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Dict = 'post_processor'
lowercase : int = getattr(self.backend_tokenizer, __snake_case, __snake_case )
if tokenizer_component_instance:
lowercase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Any = tuple(state['sep'] )
if "cls" in state:
lowercase : Any = tuple(state['cls'] )
lowercase : Optional[Any] = False
if state.get('add_prefix_space', __snake_case ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Optional[Any] = True
if state.get('trim_offsets', __snake_case ) != trim_offsets:
lowercase : List[Any] = trim_offsets
lowercase : Union[str, Any] = True
if changes_to_apply:
lowercase : int = getattr(__snake_case, state.pop('type' ) )
lowercase : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer, __snake_case, __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self : Dict ) -> Dict:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : Dict, lowerCAmelCase : List[str] ) -> Union[str, Any]:
lowercase : Tuple = AddedToken(__snake_case, lstrip=__snake_case, rstrip=__snake_case ) if isinstance(__snake_case, __snake_case ) else value
lowercase : Optional[int] = value
def lowercase ( self : Optional[Any], *lowerCAmelCase : Any, **lowerCAmelCase : Union[str, Any] ) -> List[str]:
lowercase : Dict = kwargs.get('is_split_into_words', __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case, **__snake_case )
def lowercase ( self : Union[str, Any], *lowerCAmelCase : Optional[int], **lowerCAmelCase : List[str] ) -> int:
lowercase : Optional[int] = kwargs.get('is_split_into_words', __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case, **__snake_case )
def lowercase ( self : Dict, lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> List[Any]:
lowercase : Union[str, Any] = self._tokenizer.model.save(__snake_case, name=__snake_case )
return tuple(__snake_case )
def lowercase ( self : Union[str, Any], lowerCAmelCase : str, lowerCAmelCase : int=None ) -> str:
lowercase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self : Optional[int], lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> Tuple:
lowercase : int = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : List[str], lowerCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCAmelCase : Optional[int] = None, lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[bool] = None, ) -> Tuple:
lowercase : Optional[Any] = super()._pad(
encoded_inputs=__snake_case, max_length=__snake_case, padding_strategy=__snake_case, pad_to_multiple_of=__snake_case, return_attention_mask=__snake_case, )
# Load from model defaults
if return_attention_mask is None:
lowercase : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
lowercase : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 255 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ ={"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128 |
"""simple docstring"""
UpperCamelCase_ =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 128 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LayoutLMv3FeatureExtractor']
a : Any = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
import numpy as np
__snake_case = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase :Dict = np.array(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
UpperCamelCase , UpperCamelCase :Tuple = np.where(letter == self.SQUARE )
UpperCamelCase :List[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Any = message.lower()
UpperCamelCase :int = message.replace(''' ''' , '''''' )
UpperCamelCase :Dict = message.replace('''j''' , '''i''' )
UpperCamelCase :str = np.empty((2, len(SCREAMING_SNAKE_CASE_ )) )
for letter_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Dict = self.letter_to_numbers(message[letter_index] )
UpperCamelCase :Union[str, Any] = numbers[0]
UpperCamelCase :Dict = numbers[1]
UpperCamelCase :Any = first_step.reshape(2 * len(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = ''''''
for numbers_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Dict = int(second_step[numbers_index * 2] )
UpperCamelCase :List[str] = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase :Tuple = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Any = message.lower()
message.replace(''' ''' , '''''' )
UpperCamelCase :Optional[int] = np.empty(2 * len(SCREAMING_SNAKE_CASE_ ) )
for letter_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :List[str] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase :Dict = numbers[0]
UpperCamelCase :List[str] = numbers[1]
UpperCamelCase :int = first_step.reshape((2, len(SCREAMING_SNAKE_CASE_ )) )
UpperCamelCase :Any = ''''''
for numbers_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Any = int(second_step[0, numbers_index] )
UpperCamelCase :List[Any] = int(second_step[1, numbers_index] )
UpperCamelCase :Tuple = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = decoded_message + letter
return decoded_message
| 259 | 0 |
"""simple docstring"""
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def count_of_possible_combinations(SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase : Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE_ )
for item in array )
UpperCamelCase : Union[str, Any] = answer
return answer
UpperCamelCase : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Tuple = [0] * (target + 1)
UpperCamelCase : List[Any] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Optional[int] = 5
__UpperCAmelCase : Any = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 358 |
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315 | 0 |
import math
def A_ ( snake_case : int , snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase__ : Optional[Any] = "Enter the base and the power separated by a comma: "
lowercase__ , lowercase__ : int = map(int, input(prompt).split(","))
lowercase__ , lowercase__ : Tuple = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase__ : Union[str, Any] = res(xa, ya)
lowercase__ : Any = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 328 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : str ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32 | """simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A__ : List[str] = "CIDAS/clipseg-rd64-refined"
A__ : int = "image_segmenter"
A__ : Any = CLIPSegForImageSegmentation
A__ : List[str] = ["image", "text"]
A__ : Dict = ["image"]
def __init__(self : Tuple , *snake_case__ : List[str] , **snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : "Image" , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="pt" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
with torch.no_grad():
snake_case : Tuple = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = outputs.cpu().detach().numpy()
snake_case : Dict = 0
snake_case : Tuple = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 59 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : int = tempfile.mkdtemp()
# fmt: off
snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case : int = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = image_processor(snake_case__ , return_tensors="np" )
snake_case : Any = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Tuple = "lower newer"
snake_case : Tuple = processor(text=snake_case__ )
snake_case : Union[str, Any] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : Dict = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(snake_case__ )
snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = "lower newer"
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 59 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__: Optional[Any] = logging.get_logger(__name__)
a__: List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''nat'''
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self,__lowerCamelCase=4,__lowerCamelCase=3,__lowerCamelCase=64,__lowerCamelCase=[3, 4, 6, 5],__lowerCamelCase=[2, 4, 8, 16],__lowerCamelCase=7,__lowerCamelCase=3.0,__lowerCamelCase=True,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase="gelu",__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase=0.0,__lowerCamelCase=None,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase )
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(__lowerCamelCase )
A__ = num_heads
A__ = kernel_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = layer_norm_eps
A__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
A__ = layer_scale_init_value
A__ = ['''stem'''] + [f"stage{idx}" for idx in range(1,len(__lowerCamelCase ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase,out_indices=__lowerCamelCase,stage_names=self.stage_names )
| 366 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 0 |
def _lowerCAmelCase (_lowerCAmelCase = 1_00_00_00):
UpperCamelCase_ = 1
UpperCamelCase_ = 1
UpperCamelCase_ = {1: 1}
for inputa in range(2 , _lowerCAmelCase):
UpperCamelCase_ = 0
UpperCamelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCamelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCamelCase_ = counter
if counter > pre_counter:
UpperCamelCase_ = inputa
UpperCamelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 128 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {"num_train_timesteps": 1000}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
UpperCamelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.timesteps[5]
UpperCamelCase_ = scheduler.timesteps[6]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 128 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( A_ ,unittest.TestCase ):
'''simple docstring'''
a__ =LayoutLMTokenizer
a__ =LayoutLMTokenizerFast
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_UpperCAmelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
_UpperCAmelCase : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 368 |
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
_UpperCAmelCase = f'{src_lang}-{tgt_lang}'
_UpperCAmelCase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__lowercase , exist_ok=__lowercase )
_UpperCAmelCase = os.path.join(__lowercase , "README.md" )
print(f'Generating {path}' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE :Any = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = model_name.split('''-''')
__SCREAMING_SNAKE_CASE :List[str] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( a ) -> Any:
_A: List[Any] = torch.load(a , map_location='''cpu''' )
if "model" in sd.keys():
_A: str = torch.load(a , map_location='''cpu''' )['''model''']
# pop unnecessary weights
_A: str = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a )
_A: Dict = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_A: str = sd.pop(a )
_A: Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_A: str = sd[key]
# We split QKV in separate Q,K,V
_A: Any = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
_A: Union[str, Any] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
_A: str = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
_A: int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_A: Dict = torch.split(a , depth // 3 , dim=0 )
_A: List[Any] = q
_A: str = k
_A: List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( a , a , a=None ) -> Union[str, Any]:
_A: int = load_checkpoint(a )
if config is not None:
_A: Dict = OPTConfig.from_pretrained(a )
else:
_A: Optional[Any] = OPTConfig()
_A: List[str] = OPTModel(a ).half().eval()
model.load_state_dict(a )
# Check results
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase__ : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 360 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
a_ : Any = len(__A )
for i in range(1 , __A ):
a_ : Optional[Any] = collection[i]
a_ : Tuple = 0
a_ : Optional[Any] = i - 1
while low <= high:
a_ : str = (low + high) // 2
if val < collection[mid]:
a_ : List[Any] = mid - 1
else:
a_ : Optional[Any] = mid + 1
for j in range(__A , __A , -1 ):
a_ : List[str] = collection[j - 1]
a_ : int = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Any = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 32 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
a_ : Tuple = []
for line in lines:
a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Tuple = '\n'.join(__A )
# Make a hash from all this code
a_ : Tuple = full_str.encode('utf-8' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Dict = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 32 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Optional[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : int=True , lowercase_ : str=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=99 , lowercase_ : Optional[int]=32 , lowercase_ : str=5 , lowercase_ : Tuple=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Dict=0.1 , lowercase_ : int=512 , lowercase_ : str=16 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : str=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=None , ) -> Optional[Any]:
UpperCAmelCase : Any = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Dict = use_token_type_ids
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : Any = scope
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Optional[int] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : List[Any] = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Dict = model(lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : List[str] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
UpperCAmelCase : Tuple = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
UpperCAmelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str , ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any] , ) -> Optional[Any]:
UpperCAmelCase : int = True
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
UpperCAmelCase : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
UpperCAmelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
UpperCAmelCase : Dict = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ : Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Dict = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = False
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : int = OpenLlamaModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = 3
UpperCAmelCase : List[Any] = input_dict['input_ids']
UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : List[str] = 'single_label_classification'
UpperCAmelCase : Union[str, Any] = input_dict['input_ids']
UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Tuple = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 'multi_label_classification'
UpperCAmelCase : Union[str, Any] = input_dict['input_ids']
UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : int = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self : List[str] ) -> str:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Tuple = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
UpperCAmelCase : int = original_model(lowercase_ ).last_hidden_state
UpperCAmelCase : Optional[int] = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
UpperCAmelCase : List[Any] = scaled_model(lowercase_ ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 353 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase : Optional[int] = (left + right) >> 1 # the middle
UpperCAmelCase : Any = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 280 | 0 |
from __future__ import annotations
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__a , __a : int = text, pattern
__a , __a : Dict = len(snake_case_ ), len(snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : List[str] ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Tuple ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCAmelCase (self : int ):
__a : Any = []
for i in range(self.textLen - self.patLen + 1 ):
__a : Optional[Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
__a : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
__a : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase__ ='ABAABA'
lowercase__ ='AB'
lowercase__ =BoyerMooreSearch(text, pattern)
lowercase__ =bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 216 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39 | 0 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_A = open # noqa: we just need to have a builtin inside this module to test it properly
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
UpperCamelCase :str = len(__magic_name__ )
while cur > 1:
# Find the maximum number in arr
UpperCamelCase :Optional[int] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCamelCase :List[Any] = arr[mi::-1] + arr[mi + 1 : len(__magic_name__ )]
# Reverse whole list
UpperCamelCase :Dict = arr[cur - 1 :: -1] + arr[cur : len(__magic_name__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : List[str] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 38 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68 | 0 |
from __future__ import annotations
_lowerCAmelCase : str = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __magic_name__ :
def __init__( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a =graph
# mapping node to its parent in resulting breadth first tree
__a ={}
__a =source_vertex
def __magic_name__ ( self ) -> None:
'''simple docstring'''
__a ={self.source_vertex}
__a =None
__a =[self.source_vertex] # first in first out queue
while queue:
__a =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
__a =vertex
queue.append(__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a =self.parent.get(__snake_case )
if target_vertex_parent is None:
__a =(
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + f'->{target_vertex}'
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 363 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a_ ( _A , _A="shi-labs/oneformer_demo" ) -> Optional[Any]:
"""simple docstring"""
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
snake_case__ = json.load(_lowerCAmelCase )
snake_case__ = {}
snake_case__ = []
snake_case__ = []
for key, info in class_info.items():
snake_case__ = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
snake_case__ = thing_ids
snake_case__ = class_names
return metadata
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any]=7 , UpperCamelCase: Tuple=3 , UpperCamelCase: Optional[int]=30 , UpperCamelCase: Union[str, Any]=4_00 , UpperCamelCase: Optional[Any]=None , UpperCamelCase: str=True , UpperCamelCase: int=True , UpperCamelCase: List[str]=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=False , UpperCamelCase: Tuple=2_55 , UpperCamelCase: Optional[Any]="shi-labs/oneformer_demo" , UpperCamelCase: Optional[int]="ade20k_panoptic.json" , UpperCamelCase: Optional[int]=10 , ) -> Union[str, Any]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = class_info_file
snake_case__ = prepare_metadata(snake_case_ , snake_case_ )
snake_case__ = num_text
snake_case__ = repo_path
# for the post_process_functions
snake_case__ = 2
snake_case__ = 10
snake_case__ = 10
snake_case__ = 3
snake_case__ = 4
snake_case__ = num_labels
snake_case__ = do_reduce_labels
snake_case__ = ignore_index
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: int=False ) -> Dict:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(snake_case_ , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(snake_case_ , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
_UpperCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_UpperCAmelCase = image_processing_class
def lowerCAmelCase_ ( self: List[Any] ) -> str:
snake_case__ = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase_ ( self: Dict ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
self.assertTrue(hasattr(snake_case_ , 'ignore_index' ) )
self.assertTrue(hasattr(snake_case_ , 'class_info_file' ) )
self.assertTrue(hasattr(snake_case_ , 'num_text' ) )
self.assertTrue(hasattr(snake_case_ , 'repo_path' ) )
self.assertTrue(hasattr(snake_case_ , 'metadata' ) )
self.assertTrue(hasattr(snake_case_ , 'do_reduce_labels' ) )
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
pass
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
# Initialize image_processor
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
snake_case__ = image_processor(
snake_case_ , ['semantic'] * len(snake_case_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]:
# Initialize image_processor
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
snake_case__ = image_processor(
snake_case_ , ['semantic'] * len(snake_case_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
# Initialize image_processor
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
snake_case__ = image_processor(
snake_case_ , ['semantic'] * len(snake_case_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str=False , UpperCamelCase: int=False , UpperCamelCase: Union[str, Any]="np" ) -> Optional[Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case__ = self.image_processing_tester.num_labels
snake_case__ = None
snake_case__ = None
snake_case__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
snake_case__ = num_labels
if is_instance_map:
snake_case__ = list(range(snake_case_ ) ) * 2
snake_case__ = dict(enumerate(snake_case_ ) )
snake_case__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case__ = [Image.fromarray(snake_case_ ) for annotation in annotations]
snake_case__ = image_processor(
snake_case_ , ['semantic'] * len(snake_case_ ) , snake_case_ , return_tensors='pt' , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
def common(UpperCamelCase: Dict=False , UpperCamelCase: Optional[Any]=None ):
snake_case__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
snake_case__ = inputs['mask_labels']
snake_case__ = inputs['class_labels']
snake_case__ = inputs['pixel_values']
snake_case__ = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type='pil' )
common(is_instance_map=snake_case_ , segmentation_type='pil' )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
snake_case__ = np.zeros((20, 50) )
snake_case__ = 1
snake_case__ = 1
snake_case__ = 1
snake_case__ = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]:
snake_case__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
snake_case__ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
snake_case__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case__ = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
snake_case__ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
snake_case__ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case__ = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 307 |
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 0 |
import math
_lowerCamelCase : int = 10
_lowerCamelCase : Dict = 7
_lowerCamelCase : int = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCamelCase (UpperCAmelCase__ : int = 2_0 ):
SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 366 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__a = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = '''https://pypi.org/pypi/diffusers/json'''
UpperCAmelCase_ : Any = json.loads(request.urlopen(_lowercase ).read() )['''releases'''].keys()
return sorted(_lowercase , key=lambda _lowercase : version.Version(_lowercase ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : Optional[int] = Path(_lowercase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
init_hf_modules()
UpperCAmelCase_ : Optional[int] = Path(_lowercase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : Optional[Any] = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : int = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase_ : Union[str, Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowercase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowercase ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = [module_file]
UpperCAmelCase_ : Union[str, Any] = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase_ : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase ) )
UpperCAmelCase_ : str = Path(_lowercase ).parent
UpperCAmelCase_ : Tuple = [str(module_path / m ) for m in new_imports]
UpperCAmelCase_ : Tuple = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase_ : Dict = [f'''{f}.py''' for f in new_import_files]
UpperCAmelCase_ : Optional[int] = len(_lowercase ) == 0
all_relative_imports.extend(_lowercase )
return all_relative_imports
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : int = f.read()
# Imports of the form `import xxx`
UpperCAmelCase_ : List[Any] = re.findall('''^\s*import\s+(\S+)\s*$''' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowercase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase_ : str = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
UpperCAmelCase_ : Optional[Any] = list(set(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(_lowercase )
except ImportError:
missing_packages.append(_lowercase )
if len(_lowercase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'''{", ".join(_lowercase )}. Run `pip install {" ".join(_lowercase )}`''' )
return get_relative_imports(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = module_path.replace(os.path.sep , '''.''' )
UpperCAmelCase_ : int = importlib.import_module(_lowercase )
if class_name is None:
return find_pipeline_class(_lowercase )
return getattr(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
UpperCAmelCase_ : str = dict(inspect.getmembers(_lowercase , inspect.isclass ) )
UpperCAmelCase_ : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowercase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
UpperCAmelCase_ : Optional[int] = cls
return pipeline_class
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(_lowercase )
UpperCAmelCase_ : Optional[int] = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
UpperCAmelCase_ : Union[str, Any] = module_file_or_url
UpperCAmelCase_ : Optional[int] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
UpperCAmelCase_ : Any = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase_ : str = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase_ : Optional[Any] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
UpperCAmelCase_ : Optional[int] = f'''v{revision}'''
elif revision == "main":
UpperCAmelCase_ : Optional[Any] = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
UpperCAmelCase_ : Optional[int] = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase )
try:
UpperCAmelCase_ : Union[str, Any] = cached_download(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
UpperCAmelCase_ : List[Any] = '''git'''
UpperCAmelCase_ : List[Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
UpperCAmelCase_ : Tuple = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase_ : List[Any] = check_imports(_lowercase )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase_ : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase )
UpperCAmelCase_ : Union[str, Any] = Path(_lowercase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase_ : Tuple = f'''{module_needed}.py'''
shutil.copy(os.path.join(_lowercase , _lowercase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : str = use_auth_token
elif use_auth_token is True:
UpperCAmelCase_ : Optional[int] = HfFolder.get_token()
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = model_info(_lowercase , revision=_lowercase , token=_lowercase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase_ : int = submodule_path / commit_hash
UpperCAmelCase_ : Any = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase , f'''{module_needed}.py''' , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return os.path.join(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = get_cached_module_file(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return get_class_in_module(_lowercase , final_module.replace('''.py''' , '''''' ) ) | 235 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = 11
UpperCAmelCase_ : int = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase , _lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase , _lowercase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase_ : Any = 10
return solutions
def lowerCamelCase__ ( _lowercase = 2 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 1.0
for fraction in fraction_list(_lowercase ):
UpperCAmelCase_ : Optional[Any] = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 235 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 63 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'spiece.model'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Union[str, Any] = 4
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = """left"""
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[Any]="<sep>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Dict="<cls>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Optional[int]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = 3
A_ : Union[str, Any] = do_lower_case
A_ : Tuple = remove_space
A_ : int = keep_accents
A_ : Optional[Any] = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def a_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : str = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : List[Any] = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.remove_space:
A_ : str = ''' '''.join(inputs.strip().split() )
else:
A_ : Any = inputs
A_ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A_ : Any = unicodedata.normalize('''NFKD''' , _lowerCamelCase )
A_ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : str = outputs.lower()
return outputs
def a_ ( self : List[str] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.preprocess_text(_lowerCamelCase )
A_ : int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Tuple = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def a_ ( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Any = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
A_ : List[str] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Any = []
A_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : int = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Dict = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def a_ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 167 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | #
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *_lowercase ):
with open(_lowercase , '''r''' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__UpperCamelCase : Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__UpperCamelCase : Any = torch.device('cuda', local_rank)
__UpperCamelCase : Union[str, Any] = socket.gethostname()
__UpperCamelCase : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : List[Any] = dist.get_rank()
__UpperCamelCase : List[Any] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 258 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Optional[int] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = '''yolos'''
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=[512, 864] , lowerCamelCase_=16 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=100 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=1 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=5 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**_A )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = version.parse("1.11" )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 12
| 227 |
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''') | 308 | 0 |
import doctest
from collections import deque
import numpy as np
class A__ :
def __init__( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = [2, 1, 2, -1]
lowerCAmelCase__ : Union[str, Any] = [1, 2, 3, 4]
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(self.first_signal )
lowerCAmelCase__ : Optional[Any] = len(self.second_signal )
lowerCAmelCase__ : str = max(a , a )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ : str = [[0] * max_length for i in range(a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(a ):
lowerCAmelCase__ : int = deque(self.second_signal )
rotated_signal.rotate(a )
for j, item in enumerate(a ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ : Union[str, Any] = np.matmul(np.transpose(a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 307 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list) | 307 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase: Any = NewType("DataClass", Any)
_lowercase: List[str] = NewType("DataClassType", Any)
def a( A : Dict ) -> Any:
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def a( A : str ) -> int:
"""simple docstring"""
a = {str(lowerCamelCase__ ): choice for choice in choices}
return lambda A : str_to_choice.get(lowerCamelCase__ , lowerCamelCase__ )
def a( *,
A : List[Any] = None , A : Union[str, Any] = None , A : Optional[int] = dataclasses.MISSING , A : Optional[Any] = dataclasses.MISSING , A : int = None , **A : List[str] , ) -> List[Any]:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a = {}
if aliases is not None:
a = aliases
if help is not None:
a = help
return dataclasses.field(metadata=lowerCamelCase__ , default=lowerCamelCase__ , default_factory=lowerCamelCase__ , **lowerCamelCase__ )
class _lowercase ( __UpperCAmelCase ):
"""simple docstring"""
__A = 42
def __init__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
a = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
a = [dataclass_types]
a = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = F'''--{field.name}'''
a = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
a = kwargs.pop("aliases" , [] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = [aliases]
a = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowerCamelCase_ , "UnionType" ) and isinstance(lowerCamelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
a = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a = (
field.type.__args__[0] if isinstance(lowerCamelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
a = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase_ ) and issubclass(field.type , lowerCamelCase_ )):
if origin_type is Literal:
a = field.type.__args__
else:
a = [x.value for x in field.type]
a = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
a = field.default
else:
a = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
a = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a = default
# This tells argparse we accept 0 or 1 value after --field_name
a = """?"""
# This is the value that will get picked if we do --field_name (without value)
a = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ):
a = field.type.__args__[0]
a = """+"""
if field.default_factory is not dataclasses.MISSING:
a = field.default_factory()
elif field.default is dataclasses.MISSING:
a = True
else:
a = field.type
if field.default is not dataclasses.MISSING:
a = field.default
elif field.default_factory is not dataclasses.MISSING:
a = field.default_factory()
else:
a = True
parser.add_argument(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a = False
parser.add_argument(F'''--no_{field.name}''' , action="store_false" , dest=field.name , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if hasattr(lowerCamelCase_ , "_argument_group_name" ):
a = self.add_argument_group(dtype._argument_group_name )
else:
a = self
try:
a = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
a = """.""".join(map(lowerCamelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
a = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_ , type=lowerCamelCase_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
a = args_file_parser.parse_known_args(args=lowerCamelCase_ )
a = vars(lowerCamelCase_ ).get(args_file_flag.lstrip("-" ) , lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
a = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a = file_args + args if args is not None else file_args + sys.argv[1:]
a = self.parse_known_args(args=lowerCamelCase_ )
a = []
for dtype in self.dataclass_types:
a = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
a = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_ , lowerCamelCase_ )
a = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
a = set(args.keys() )
a = []
for dtype in self.dataclass_types:
a = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
a = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCamelCase_ ) , encoding="utf-8" ) as open_json_file:
a = json.loads(open_json_file.read() )
a = self.parse_dict(lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
a = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ) , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 227 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a ( lowerCamelCase__=None ):
'''simple docstring'''
A_ : int = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
A_ : int = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
A_ : int = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def a ( ):
'''simple docstring'''
A_ : Optional[int] = get_config_parser()
A_ : List[str] = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main() | 206 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE_ = ["image_processor"]
SCREAMING_SNAKE_CASE_ = "SamImageProcessor"
def __init__( self, lowerCAmelCase__) -> Tuple:
super().__init__(__SCREAMING_SNAKE_CASE)
snake_case_ = self.image_processor
snake_case_ = -10
snake_case_ = self.image_processor.size['longest_edge']
def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = self.image_processor(
__SCREAMING_SNAKE_CASE, return_tensors=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE, )
# pop arguments that are not used in the foward but used nevertheless
snake_case_ = encoding_image_processor['original_sizes']
if hasattr(__SCREAMING_SNAKE_CASE, 'numpy'): # Checks if Torch or TF tensor
snake_case_ = original_sizes.numpy()
snake_case_ , snake_case_ , snake_case_ = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE, input_labels=__SCREAMING_SNAKE_CASE, input_boxes=__SCREAMING_SNAKE_CASE, )
snake_case_ = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, input_points=__SCREAMING_SNAKE_CASE, input_labels=__SCREAMING_SNAKE_CASE, input_boxes=__SCREAMING_SNAKE_CASE, return_tensors=__SCREAMING_SNAKE_CASE, )
return encoding_image_processor
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="pt", ) -> Union[str, Any]:
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE):
snake_case_ = [
self._normalize_coordinates(self.target_size, __SCREAMING_SNAKE_CASE, original_sizes[0]) for point in input_points
]
else:
snake_case_ = [
self._normalize_coordinates(self.target_size, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
for point, original_size in zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
snake_case_ , snake_case_ = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
snake_case_ = np.array(__SCREAMING_SNAKE_CASE)
if input_labels is not None:
snake_case_ = np.array(__SCREAMING_SNAKE_CASE)
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE):
snake_case_ = [
self._normalize_coordinates(self.target_size, __SCREAMING_SNAKE_CASE, original_sizes[0], is_bounding_box=__SCREAMING_SNAKE_CASE)
for box in input_boxes
]
else:
snake_case_ = [
self._normalize_coordinates(self.target_size, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, is_bounding_box=__SCREAMING_SNAKE_CASE)
for box, original_size in zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
]
snake_case_ = np.array(__SCREAMING_SNAKE_CASE)
if input_boxes is not None:
if return_tensors == "pt":
snake_case_ = torch.from_numpy(__SCREAMING_SNAKE_CASE)
# boxes batch size of 1 by default
snake_case_ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
snake_case_ = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE)
# boxes batch size of 1 by default
snake_case_ = tf.expand_dims(__SCREAMING_SNAKE_CASE, 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
snake_case_ = torch.from_numpy(__SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
snake_case_ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
snake_case_ = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
snake_case_ = tf.expand_dims(__SCREAMING_SNAKE_CASE, 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
snake_case_ = torch.from_numpy(__SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
snake_case_ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
snake_case_ = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
snake_case_ = tf.expand_dims(__SCREAMING_SNAKE_CASE, 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = max([point.shape[0] for point in input_points])
snake_case_ = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE):
if point.shape[0] != expected_nb_points:
snake_case_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value], axis=0)
snake_case_ = np.append(input_labels[i], [self.point_pad_value])
processed_input_points.append(__SCREAMING_SNAKE_CASE)
snake_case_ = processed_input_points
return input_points, input_labels
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=False) -> Tuple:
snake_case_ , snake_case_ = original_size
snake_case_ , snake_case_ = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE, longest_edge=__SCREAMING_SNAKE_CASE)
snake_case_ = deepcopy(__SCREAMING_SNAKE_CASE).astype(__SCREAMING_SNAKE_CASE)
if is_bounding_box:
snake_case_ = coords.reshape(-1, 2, 2)
snake_case_ = coords[..., 0] * (new_w / old_w)
snake_case_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case_ = coords.reshape(-1, 4)
return coords
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, ) -> Optional[int]:
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE, 'numpy'): # Checks for TF or Torch tensor
snake_case_ = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) or not isinstance(input_points[0], __SCREAMING_SNAKE_CASE):
raise ValueError('Input points must be a list of list of floating points.')
snake_case_ = [np.array(__SCREAMING_SNAKE_CASE) for input_point in input_points]
else:
snake_case_ = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE, 'numpy'):
snake_case_ = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) or not isinstance(input_labels[0], __SCREAMING_SNAKE_CASE):
raise ValueError('Input labels must be a list of list integers.')
snake_case_ = [np.array(__SCREAMING_SNAKE_CASE) for label in input_labels]
else:
snake_case_ = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE, 'numpy'):
snake_case_ = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
or not isinstance(input_boxes[0], __SCREAMING_SNAKE_CASE)
or not isinstance(input_boxes[0][0], __SCREAMING_SNAKE_CASE)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
snake_case_ = [np.array(__SCREAMING_SNAKE_CASE).astype(np.floataa) for box in input_boxes]
else:
snake_case_ = None
return input_points, input_labels, input_boxes
@property
def a_ ( self) -> List[Any]:
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE))
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Any:
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE)
| 363 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312 | 0 |
a__ = '''Tobias Carryer'''
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a , _a=int(time() ) ) -> Any: # noqa: B008
_a : Tuple = multiplier
_a : Any = increment
_a : Union[str, Any] = modulo
_a : Optional[Any] = seed
def __lowercase ( self ) -> List[Any]:
_a : Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 235 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
a__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
a__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a=4 , _a=False ) -> Optional[Any]:
_a : List[Any] = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 235 | 1 |
import qiskit
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a__ , a__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE : Any = qiskit.execute(a__ , a__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 358 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
'''simple docstring'''
_lowerCamelCase : str = 256
# Modulus to hash a string
_lowerCamelCase : Dict = 100_0003
def __a ( UpperCAmelCase , UpperCAmelCase ) ->bool:
"""simple docstring"""
A = len(UpperCAmelCase )
A = len(UpperCAmelCase )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) ->None:
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
A = """Lue"""
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 258 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 1 |
_lowercase: Optional[int] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def a( A : float ) -> str:
"""simple docstring"""
assert type(A ) in (int, float) and decimal == int(A )
a = int(A )
a = ""
a = False
if decimal < 0:
a = True
decimal *= -1
while decimal > 0:
a , a = divmod(A , 16 )
a = values[remainder] + hexadecimal
a = "0x" + hexadecimal
if negative:
a = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 71 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCamelCase : Union[str, Any] = random.Random()
def a_ ( _A , _A=1.0 , _A=None , _A=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
snake_case__ = global_rng
snake_case__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: List[str]=7 , UpperCamelCase: List[str]=4_00 , UpperCamelCase: Any=20_00 , UpperCamelCase: Any=10 , UpperCamelCase: Union[str, Any]=1_60 , UpperCamelCase: Tuple=8 , UpperCamelCase: Tuple=0.0 , UpperCamelCase: str=40_00 , UpperCamelCase: Any=False , UpperCamelCase: Tuple=True , ) -> Optional[int]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = min_seq_length
snake_case__ = max_seq_length
snake_case__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ = padding_value
snake_case__ = sampling_rate
snake_case__ = return_attention_mask
snake_case__ = do_normalize
snake_case__ = feature_size
snake_case__ = chunk_length
snake_case__ = hop_length
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ ( self: str , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: int=False ) -> Union[str, Any]:
def _flatten(UpperCamelCase: Union[str, Any] ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
snake_case__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase_ ( self: Any ) -> List[str]:
snake_case__ = WhisperFeatureExtractionTester(self )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
snake_case__ = self.feature_extraction_class.from_pretrained(UpperCamelCase )
snake_case__ = feat_extract_first.to_dict()
snake_case__ = feat_extract_second.to_dict()
snake_case__ = feat_extract_first.mel_filters
snake_case__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
snake_case__ = self.feature_extraction_class.from_json_file(UpperCamelCase )
snake_case__ = feat_extract_first.to_dict()
snake_case__ = feat_extract_second.to_dict()
snake_case__ = feat_extract_first.mel_filters
snake_case__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case__ = feature_extractor(UpperCamelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case__ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
snake_case__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case__ = np.asarray(UpperCamelCase )
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test truncation required
snake_case__ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
snake_case__ = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
snake_case__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case__ = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs_truncated]
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
import torch
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
snake_case__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int ) -> Optional[int]:
snake_case__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case__ = ds.sort('id' ).select(range(UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
# fmt: off
snake_case__ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case__ = self._load_datasamples(1 )
snake_case__ = WhisperFeatureExtractor()
snake_case__ = feature_extractor(UpperCamelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase , atol=1e-4 ) )
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = self._load_datasamples(1 )[0]
snake_case__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
snake_case__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ) - 1 ) < 1e-3 ) )
| 307 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "LayoutLMv2ImageProcessor"
_UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['words']
snake_case__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
snake_case__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
snake_case__ = images
return encoded_inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Any = SamImageProcessor()
UpperCamelCase__ : Union[str, Any] = SamProcessor(__magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(__magic_name__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[Any] = self.get_image_processor(do_normalize=__magic_name__, padding_value=1.0 )
UpperCamelCase__ : int = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__magic_name__, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Any = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : Tuple = image_processor(__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : Union[str, Any] = processor(images=__magic_name__, return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Any = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
UpperCamelCase__ : Optional[int] = [[1764, 2646]]
UpperCamelCase__ : List[str] = [[683, 1024]]
UpperCamelCase__ : List[str] = processor.post_process_masks(__magic_name__, __magic_name__, __magic_name__ )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ : Any = processor.post_process_masks(
__magic_name__, torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ : Any = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ : Tuple = processor.post_process_masks(__magic_name__, np.array(__magic_name__ ), np.array(__magic_name__ ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(__magic_name__ ):
UpperCamelCase__ : Any = processor.post_process_masks(__magic_name__, np.array(__magic_name__ ), np.array(__magic_name__ ) )
@require_vision
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCamelCase__ : Dict = SamImageProcessor()
UpperCamelCase__ : Optional[Any] = SamProcessor(__magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(__magic_name__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : List[Any] = self.get_image_processor(do_normalize=__magic_name__, padding_value=1.0 )
UpperCamelCase__ : int = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__magic_name__, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = image_processor(__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : Optional[int] = processor(images=__magic_name__, return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_tf
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
UpperCamelCase__ : Union[str, Any] = [[1764, 2646]]
UpperCamelCase__ : Optional[int] = [[683, 1024]]
UpperCamelCase__ : str = processor.post_process_masks(__magic_name__, __magic_name__, __magic_name__, return_tensors='''tf''' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ : Tuple = processor.post_process_masks(
__magic_name__, tf.convert_to_tensor(__magic_name__ ), tf.convert_to_tensor(__magic_name__ ), return_tensors='''tf''', )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ : List[str] = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ : List[str] = processor.post_process_masks(
__magic_name__, np.array(__magic_name__ ), np.array(__magic_name__ ), return_tensors='''tf''' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCamelCase__ : int = processor.post_process_masks(
__magic_name__, np.array(__magic_name__ ), np.array(__magic_name__ ), return_tensors='''tf''' )
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase__ : int = SamImageProcessor()
UpperCamelCase__ : int = SamProcessor(__magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(__magic_name__, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : Any = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCamelCase__ : str = [tf.convert_to_tensor(__magic_name__ )]
UpperCamelCase__ : int = [torch.tensor(__magic_name__ )]
UpperCamelCase__ : List[str] = [[1764, 2646]]
UpperCamelCase__ : str = [[683, 1024]]
UpperCamelCase__ : Union[str, Any] = processor.post_process_masks(
__magic_name__, __magic_name__, __magic_name__, return_tensors='''tf''' )
UpperCamelCase__ : Optional[int] = processor.post_process_masks(
__magic_name__, __magic_name__, __magic_name__, return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : str = SamProcessor(image_processor=__magic_name__ )
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[int] = image_processor(__magic_name__, return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCamelCase__ : str = processor(images=__magic_name__, return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCamelCase__ : Optional[Any] = image_processor(__magic_name__, return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCamelCase__ : Optional[Any] = processor(images=__magic_name__, return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
| 353 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__snake_case :Optional[int] = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__snake_case :List[str] = '''UperNetConfig'''
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int], str] = 0 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
__a = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
__a = nn.BatchNormad(__SCREAMING_SNAKE_CASE)
__a = nn.ReLU()
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
__a = self.conv(__SCREAMING_SNAKE_CASE)
__a = self.batch_norm(__SCREAMING_SNAKE_CASE)
__a = self.activation(__SCREAMING_SNAKE_CASE)
return output
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
__a = input
for layer in self.layers:
__a = layer(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple[int, ...] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool):
'''simple docstring'''
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE):
__a = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE)
self.blocks.append(__SCREAMING_SNAKE_CASE)
self.add_module(str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
__a = []
for ppm in self.blocks:
__a = ppm(__SCREAMING_SNAKE_CASE)
__a = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners)
ppm_outs.append(__SCREAMING_SNAKE_CASE)
return ppm_outs
class _A ( nn.Module ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1)
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(__SCREAMING_SNAKE_CASE)
self.fpn_convs.append(__SCREAMING_SNAKE_CASE)
__a = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.apply(self._init_weights)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE))
__a = torch.cat(__SCREAMING_SNAKE_CASE , dim=1)
__a = self.bottleneck(__SCREAMING_SNAKE_CASE)
return output
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
__a = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE))
# build top-down path
__a = len(__SCREAMING_SNAKE_CASE)
for i in range(used_backbone_levels - 1 , 0 , -1):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners)
# build outputs
__a = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners)
__a = torch.cat(__SCREAMING_SNAKE_CASE , dim=1)
__a = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
return output
class _A ( nn.Module ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1):
'''simple docstring'''
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE))
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*__SCREAMING_SNAKE_CASE)
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2)
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.apply(self._init_weights)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
__a = encoder_hidden_states[self.in_index]
__a = self.convs(__SCREAMING_SNAKE_CASE)
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
__a = self.classifier(__SCREAMING_SNAKE_CASE)
return output
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = UperNetConfig
UpperCamelCase__ : List[str] = '''pixel_values'''
UpperCamelCase__ : int = True
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = value
__snake_case :Any = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__snake_case :Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
__a = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels)
__a = UperNetFCNHead(__SCREAMING_SNAKE_CASE) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length'''))
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = outputs.feature_maps
__a = self.decode_head(__SCREAMING_SNAKE_CASE)
__a = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE)
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(__SCREAMING_SNAKE_CASE)
__a = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE)
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''')
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 49 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 312 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_UpperCAmelCase = getLogger(__name__)
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 8 , UpperCamelCase_ = DEFAULT_DEVICE , UpperCamelCase_=False , UpperCamelCase_="summarization" , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Dict:
UpperCamelCase_ = Path(UpperCamelCase_ ).open("w" , encoding="utf-8" )
UpperCamelCase_ = str(UpperCamelCase_ )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
if fpaa:
UpperCamelCase_ = model.half()
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
UpperCamelCase_ = time.time()
# update config with task specific params
use_task_specific_params(UpperCamelCase_ , UpperCamelCase_ )
if prefix is None:
UpperCamelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(UpperCamelCase_ , UpperCamelCase_ ) ) ):
UpperCamelCase_ = [prefix + text for text in examples_chunk]
UpperCamelCase_ = tokenizer(UpperCamelCase_ , return_tensors="pt" , truncation=UpperCamelCase_ , padding="longest" ).to(UpperCamelCase_ )
UpperCamelCase_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **UpperCamelCase_ , )
UpperCamelCase_ = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCamelCase_ = int(time.time() - start_time ) # seconds
UpperCamelCase_ = len(UpperCamelCase_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCAmelCase_ ( ) -> List[Any]:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowerCAmelCase_ ( UpperCamelCase_=True ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("model_name" , type=UpperCamelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=UpperCamelCase_ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=UpperCamelCase_ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=UpperCamelCase_ , required=UpperCamelCase_ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=UpperCamelCase_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=UpperCamelCase_ , default=8 , required=UpperCamelCase_ , help="batch size" )
parser.add_argument(
"--n_obs" , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=UpperCamelCase_ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase_ , UpperCamelCase_ = parser.parse_known_args()
UpperCamelCase_ = parse_numeric_n_bool_cl_kwargs(UpperCamelCase_ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
UpperCamelCase_ = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=UpperCamelCase_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
UpperCamelCase_ = generate_summaries_or_translations(
UpperCamelCase_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **UpperCamelCase_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase_ = calculate_bleu if "translation" in args.task else calculate_rouge
UpperCamelCase_ = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCamelCase_ )]
UpperCamelCase_ = score_fn(UpperCamelCase_ , UpperCamelCase_ )
scores.update(UpperCamelCase_ )
if args.dump_args:
scores.update(UpperCamelCase_ )
if args.info:
UpperCamelCase_ = args.info
if verbose:
print(UpperCamelCase_ )
if args.score_path is not None:
json.dump(UpperCamelCase_ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 1 |
"""simple docstring"""
import math
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
A : Union[str, Any] = "Enter the base and the power separated by a comma: "
A , A : str = map(int, input(prompt).split(","))
A , A : str = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
A : List[Any] = res(xa, ya)
A : Tuple = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 57 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
super().__init__(*_A , **_A )
requires_backends(self , '''decord''' )
self.check_model_type(_A )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Any:
lowercase__ : Tuple = {}
if frame_sampling_rate is not None:
lowercase__ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
lowercase__ : int = num_frames
lowercase__ : int = {}
if top_k is not None:
lowercase__ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return super().__call__(_A , **_A )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=1 ) -> Dict:
if num_frames is None:
lowercase__ : int = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase__ : Tuple = BytesIO(requests.get(_A ).content )
lowercase__ : Optional[Any] = VideoReader(_A )
videoreader.seek(0 )
lowercase__ : int = 0
lowercase__ : Tuple = num_frames * frame_sampling_rate - 1
lowercase__ : Any = np.linspace(_A , _A , num=_A , dtype=np.intaa )
lowercase__ : str = videoreader.get_batch(_A ).asnumpy()
lowercase__ : List[Any] = list(_A )
lowercase__ : Any = self.image_processor(_A , return_tensors=self.framework )
return model_inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : str = self.model(**_A )
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
lowercase__ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : str = model_outputs.logits.softmax(-1 )[0]
lowercase__ : List[str] = probs.topk(_A )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : List[str] = scores.tolist()
lowercase__ : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 350 | '''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(UpperCAmelCase ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
lowercase__ : List[Any] = 1.0 - self.betas
lowercase__ : int = torch.cumprod(self.alphas , dim=0 )
lowercase__ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowercase__ : Tuple = variance_type
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
lowercase__ : List[str] = num_inference_steps
lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if prev_timestep is None:
lowercase__ : Any = t - 1
lowercase__ : Any = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Tuple = self.betas[t]
else:
lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowercase__ : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : Union[str, Any] = variance.log()
lowercase__ : Optional[int] = beta.log()
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowercase__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : int = t - 1
lowercase__ : Optional[int] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[int] = self.betas[t]
lowercase__ : Optional[Any] = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[Any] = 0
if t > 0:
lowercase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowercase__ : Union[str, Any] = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : List[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowercase__ : List[str] = variance * variance_noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 214 | 0 |
from collections.abc import Callable
import numpy as np
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> np.array:
__UpperCamelCase : int =int(np.ceil((x_end - xa) / step_size ) )
__UpperCamelCase : Optional[int] =np.zeros((n + 1,) )
__UpperCamelCase : int =ya
__UpperCamelCase : Optional[int] =xa
for k in range(a_ ):
__UpperCamelCase : Optional[Any] =y[k] + step_size * ode_func(a_ ,y[k] )
__UpperCamelCase : int =y[k] + (
(step_size / 2) * (ode_func(a_ ,y[k] ) + ode_func(x + step_size ,a_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =DDIMPipeline
UpperCamelCase__ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : Tuple =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCamelCase : int =DDIMScheduler()
__UpperCamelCase : Optional[int] ={'unet': unet, 'scheduler': scheduler}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : str =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Tuple =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : int =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCamelCase : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCamelCase : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='google/ddpm-cifar10-32'
__UpperCamelCase : str =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =DDIMScheduler()
__UpperCamelCase : List[Any] =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddim.to(lowerCamelCase__ )
ddim.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : List[str] =ddim(generator=lowerCamelCase__ , eta=0.0 , output_type='numpy' ).images
__UpperCamelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : str =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='google/ddpm-ema-bedroom-256'
__UpperCamelCase : Any =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =DDIMScheduler.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Dict =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddpm.to(lowerCamelCase__ )
ddpm.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =ddpm(generator=lowerCamelCase__ , output_type='numpy' ).images
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase : Optional[Any] =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 71 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowercase__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 287 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : List[str] = sys.version_info >= (3, 10)
def a__ ( lowercase : Dict=None, lowercase : List[str]=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=lowercase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : float
_snake_case : str
_snake_case : bool
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int = 4_2
_snake_case : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : Optional[bool] = None
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
_snake_case : Any = 4_2
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : BasicEnum = "toto"
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : MixedTypeEnum = "toto"
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : Optional[float] = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : Optional[str] = None
_snake_case : Optional[List[str]] = list_field(default=[] )
_snake_case : Optional[List[int]] = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = list_field(default=[] )
_snake_case : List[int] = list_field(default=[1, 2, 3] )
_snake_case : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_snake_case : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = field()
_snake_case : str = field()
_snake_case : BasicEnum = field()
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : "BasicEnum" = field()
_snake_case : "Optional[bool]" = None
_snake_case : "str" = field(default='toto' , metadata={'help': 'help message'} )
_snake_case : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : bool | None = None
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int | None = None
_snake_case : float | None = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : str | None = None
_snake_case : list[str] | None = list_field(default=[] )
_snake_case : list[int] | None = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : argparse.ArgumentParser , lowerCAmelCase__ : argparse.ArgumentParser ) -> str:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowerCAmelCase__ ) and yy.get('''choices''' , lowerCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowerCAmelCase__ ) , yy['''type'''](lowerCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--bar''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--flag''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCamelCase) , ) = parser.parse_args_into_dataclasses(lowerCAmelCase__ , look_for_args_file=lowerCAmelCase__ )
self.assertFalse(example.flag )
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowerCAmelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
_UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Literal["titi", "toto", 4_2] = "toto"
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(
lowerCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--bar''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
_UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , bar=lowerCAmelCase__ , baz=lowerCAmelCase__ , ces=[] , des=[] ) )
_UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--required_str''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCamelCase = parser.parse_dict(lowerCAmelCase__ )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowerCAmelCase__ , parser.parse_dict , lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_json''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_yaml''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 1 |
'''simple docstring'''
from functools import reduce
lowerCAmelCase_ : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCamelCase ( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(lowercase_ , exponent // 2 , lowercase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase_ , exponent - 1 , lowercase_ )) % modulo_value
def _SCREAMING_SNAKE_CASE ( lowercase_ = 17_77 , lowercase_ = 18_55 , lowercase_ = 8 ) -> int:
A__ = base
for _ in range(1 , lowercase_ ):
A__ = _modexpt(lowercase_ , lowercase_ , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 247 | 0 |
from itertools import product
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> list[int]:
"""simple docstring"""
lowercase__ = sides_number
lowercase__ = max_face_number * dice_number
lowercase__ = [0] * (max_total + 1)
lowercase__ = 1
lowercase__ = range(__magic_name__ , max_face_number + 1 )
for dice_numbers in product(__magic_name__ , repeat=__magic_name__ ):
lowercase__ = sum(__magic_name__ )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCamelCase ( ) -> float:
"""simple docstring"""
lowercase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase__ = 0
lowercase__ = 9
lowercase__ = 4 * 9
lowercase__ = 6
for peter_total in range(__magic_name__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase__ = (4**9) * (6**6)
lowercase__ = peter_wins_count / total_games_number
lowercase__ = round(__magic_name__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 371 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = OpenAIGPTTokenizer
A__ = OpenAIGPTTokenizerFast
A__ = True
A__ = False
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
lowercase__ = """This is a simple input"""
lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ = ("""This is a simple input""", """This is a pair""")
lowercase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 146 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
__UpperCamelCase = filter(lambda snake_case : p.requires_grad , model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ : Dict = logging.getLogger(__name__)
def A_ ( snake_case : str , snake_case : Optional[Any] ) -> str:
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__UpperCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__UpperCamelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__UpperCamelCase = ModelCheckpoint(
dirpath=snake_case , filename=snake_case , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def A_ ( snake_case : List[str] , snake_case : str ) -> Dict:
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=snake_case , verbose=snake_case , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
"""simple docstring"""
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = {F"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True )-> None:
'''simple docstring'''
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / '''test_results.txt'''
__UpperCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''a+''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = F"{key}: {val:.6f}\n"
writer.write(SCREAMING_SNAKE_CASE_ )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(SCREAMING_SNAKE_CASE_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''test''' )
@rank_zero_only
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 328 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MvpTokenizer
__magic_name__ = MvpTokenizerFast
__magic_name__ = True
__magic_name__ = filter_roberta_detectors
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : int = {'unk_token': '<unk>'}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Tuple = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
# Test that special tokens are reset
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('labels' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Tuple = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Optional[Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ['A long paragraph for summarization.']
_lowerCAmelCase : List[Any] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , text_target=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : int = inputs['input_ids']
_lowerCAmelCase : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase : Optional[int] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 25 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 3 , UpperCAmelCase_ = 7 , UpperCAmelCase_ = 1_0_0_0_0_0_0 ):
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
_UpperCamelCase : int = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCamelCase : List[Any] = current_numerator
_UpperCamelCase : List[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 83 |
import unittest
from transformers import DonutProcessor
snake_case_ = '''naver-clova-ix/donut-base'''
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
lowercase__ : Dict = DonutProcessor.from_pretrained(a)
def snake_case_ ( self):
lowercase__ : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase__ : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase__ : str = self.processor.tokenajson(a)
self.assertDictEqual(a , a)
| 214 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
a : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
a : Optional[Any] = "A painting of a squirrel eating a burger"
a : List[str] = torch.manual_seed(0 )
a : Tuple = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
a : Optional[int] = output.images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a : Dict = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
a : Any = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
a : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
a : Union[str, Any] = "A painting of a squirrel eating a burger"
a : List[str] = torch.manual_seed(0 )
a : Any = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
a : str = output.images
a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a : Tuple = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __a ( self ) -> Union[str, Any]:
a : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
a : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
a : int = "A painting of a squirrel eating a burger"
a : int = torch.manual_seed(0 )
a : Any = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=lowerCAmelCase__ , )
a : Optional[Any] = output.images
a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a : Any = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 353 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a : Dict = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a : List[str] = 10
a : Optional[int] = 256
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[MinHash]:
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
a : Any = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __UpperCamelCase :
def __init__( self , *,
lowerCAmelCase__ = 0.85 , ) -> Any:
a : Any = duplication_jaccard_threshold
a : Dict = NUM_PERM
a : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a : List[str] = defaultdict(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : Any = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __a ( self ) -> List[List[Dict]]:
a : Any = []
for base, duplicates in self._duplicate_clusters.items():
a : Any = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
a : Optional[int] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __a ( self , lowerCAmelCase__ ) -> None:
a : Optional[int] = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a, a : Optional[Any] = element
a : Any = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] ) ->Optional[int]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float ) ->Dict:
'''simple docstring'''
a : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->float:
'''simple docstring'''
a : Any = get_tokens(_lowercase )
a : List[str] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a : Dict = None
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = []
for elementa in cluster:
a : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
global _shared_dataset
a : Tuple = dataset
a : List[Any] = []
a : int = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
a : str = make_duplicate_clusters(_lowercase , _lowercase )
a : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a : List[Any] = {}
a : str = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
a : Optional[Any] = element
a : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
a : Union[str, Any] = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a : Union[str, Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
a : Optional[int] = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(_lowercase )}""" )
print(F"""Number of duplicate clusters: {len(_lowercase )}""" )
print(F"""Files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Filtered dataset size: {len(_lowercase )}""" )
return ds_filter, duplicate_clusters
| 79 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=16, lowerCamelCase = 10, lowerCamelCase = 2 ):
def get_dataset(lowerCamelCase ):
lowerCamelCase : List[str] = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(lowerCamelCase, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
lowerCamelCase : Optional[int] = get_dataset(lowerCamelCase )
lowerCamelCase : str = get_dataset(lowerCamelCase )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
lowerCamelCase : Any = []
for epoch in range(lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowerCamelCase , lowerCamelCase : Dict = batch
lowerCamelCase : Tuple = model(lowerCamelCase )
lowerCamelCase : int = torch.nn.functional.mse_loss(lowerCamelCase, lowerCamelCase )
accelerator.backward(lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module):
def __init__( self ):
super().__init__()
lowerCamelCase : Tuple = nn.Parameter(torch.randn(1 ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase__ ( self , __magic_name__ ):
return x * self.a + self.b
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[int] = DummyModel()
lowerCamelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : int = ProjectConfiguration(total_limit=1 , project_dir=__magic_name__ , automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : int = Accelerator(project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
# Train baseline
lowerCamelCase : Tuple = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
lowerCamelCase : int = os.path.join(__magic_name__ , """initial""" )
accelerator.save_state(__magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
lowerCamelCase : Dict = optimizer.state_dict()
lowerCamelCase : str = train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : str = model.a.item(), model.b.item()
lowerCamelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : Any = dummy_dataloaders()
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.load_state(__magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Optional[int] = model.a.item(), model.b.item()
lowerCamelCase : int = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Tuple = train(2 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save everything
lowerCamelCase : List[Any] = os.path.join(__magic_name__ , """checkpoint""" )
accelerator.save_state(__magic_name__ )
# Load everything back in and make sure all states work
accelerator.load_state(__magic_name__ )
test_rands += train(1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Optional[int] = model.a.item(), model.b.item()
lowerCamelCase : str = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Tuple = DummyModel()
lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : Optional[int] = dummy_dataloaders()
lowerCamelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : str = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
((lowerCamelCase) , (lowerCamelCase)) : Dict = model.a.item(), model.b.item()
lowerCamelCase : Optional[int] = optimizer.state_dict()
lowerCamelCase : Tuple = train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Tuple = model.a.item(), model.b.item()
lowerCamelCase : Dict = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__magic_name__ )
lowerCamelCase : str = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) )
((lowerCamelCase) , (lowerCamelCase)) : Tuple = model.a.item(), model.b.item()
lowerCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[Any] = train(2 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : List[Any] = model.a.item(), model.b.item()
lowerCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = torch.tensor([1, 2, 3] )
lowerCamelCase : Union[str, Any] = torch.tensor([2, 3, 4] )
lowerCamelCase : Union[str, Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(net.parameters() )
lowerCamelCase : int = Accelerator()
with self.assertRaises(__magic_name__ ) as ve:
accelerator.register_for_checkpointing(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Dict = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase : Tuple = torch.optim.lr_scheduler.StepLR(__magic_name__ , step_size=1 , gamma=0.99 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : Optional[Any] = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
lowerCamelCase : Dict = scheduler.state_dict()
train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertNotEqual(__magic_name__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__magic_name__ , scheduler.state_dict() )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : List[Any] = DummyModel()
lowerCamelCase : Tuple = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ , total_limit=2 )
# Train baseline
lowerCamelCase : Optional[Any] = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase : Optional[int] = accelerator.prepare(__magic_name__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def UpperCamelCase__ ( self ):
lowerCamelCase : str = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase ="""/tmp/accelerate/state_checkpointing"""
_lowerCamelCase =DummyModel()
_lowerCamelCase =torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCamelCase =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase , _lowerCamelCase =dummy_dataloaders()
_lowerCamelCase =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase , _lowerCamelCase =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = """huggingface/label-files"""
__snake_case : List[str] = """imagenet-1k-id2label.json"""
__snake_case : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : int = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__snake_case : int = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
if "stem.conv" in name:
__snake_case : List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__snake_case : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__snake_case : Optional[Any] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__snake_case : Dict = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__snake_case : Optional[int] = """bit.encoder.""" + name
return name
def _a ( ) -> int:
"""simple docstring"""
__snake_case : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Any:
"""simple docstring"""
__snake_case : Any = get_config(_lowerCamelCase )
# load original model from timm
__snake_case : List[Any] = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
__snake_case : int = timm_model.state_dict()
for key in state_dict.copy().keys():
__snake_case : Optional[Any] = state_dict.pop(_lowerCamelCase )
__snake_case : List[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
__snake_case : Dict = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
__snake_case : Tuple = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
__snake_case : str = transform.transforms
__snake_case : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__snake_case : Any = BitImageProcessor(
do_resize=_lowerCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__snake_case : Dict = prepare_img()
__snake_case : int = transform(_lowerCamelCase ).unsqueeze(0 )
__snake_case : Optional[Any] = processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
__snake_case : Any = model(_lowerCamelCase )
__snake_case : Optional[int] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__snake_case : Optional[Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( __lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__(self , UpperCAmelCase = 1 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_lowercase =self.unet.config.sample_size / self.unet.config.sample_rate
_lowercase =audio_length_in_s * self.unet.config.sample_rate
_lowercase =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_lowercase =int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
_lowercase =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
_lowercase =int(lowerCamelCase__ )
_lowercase =next(iter(self.unet.parameters() ) ).dtype
_lowercase =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_lowercase =randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
_lowercase =self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowercase =self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
_lowercase =self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
_lowercase =audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowercase =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 5 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase) , "Tatoeba directory does not exist.")
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 146 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
if start is None:
_UpperCamelCase : Optional[int] = 0
if end is None:
_UpperCamelCase : List[Any] = len(UpperCAmelCase_ ) - 1
if start >= end:
return
_UpperCamelCase : Union[str, Any] = (start + end) // 2
slowsort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
slowsort(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
if sequence[end] < sequence[mid]:
_UpperCamelCase , _UpperCamelCase : Any = sequence[mid], sequence[end]
slowsort(UpperCAmelCase_ , UpperCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236 |
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCamelCase : Optional[int] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter Video/IGTV url: ').strip()
snake_case_ : str = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 236 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case = 200 ):
SCREAMING_SNAKE_CASE__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
SCREAMING_SNAKE_CASE__ : Any = [0] * (pence + 1)
SCREAMING_SNAKE_CASE__ : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_snake_case ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def _lowerCAmelCase ( lowercase = 100_0000 ) -> Optional[int]:
__lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 356 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase__ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCAmelCase__ = 10
lowerCAmelCase__ = 256
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__lowercase ) < MIN_NUM_TOKENS:
return None
UpperCamelCase = MinHash(num_perm=__lowercase )
for token in set(__lowercase ):
min_hash.update(token.encode() )
return min_hash
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__lowercase ) if len(t.strip() ) > 0}
class _lowerCamelCase :
def __init__(self , *,
__a = 0.85 , ) -> List[str]:
UpperCamelCase = duplication_jaccard_threshold
UpperCamelCase = NUM_PERM
UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase = defaultdict(__UpperCAmelCase )
def snake_case_ (self , __a , __a ) -> Optional[Any]:
UpperCamelCase = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def snake_case_ (self ) -> Dict:
UpperCamelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
UpperCamelCase = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def snake_case_ (self , __a ) -> Any:
UpperCamelCase = self.get_duplicate_clusters()
with open(__UpperCAmelCase , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = element
UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowercase , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=__lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowercase ) ) , max_queue_size=100 ) ):
di.add(__lowercase , __lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_tokens(__lowercase )
UpperCamelCase = get_tokens(__lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase__ = None
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
for elementa in cluster:
UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__lowercase , __lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase = 1
extremes.append(__lowercase )
return extremes
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
global _shared_dataset
UpperCamelCase = dataset
UpperCamelCase = []
UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowercase , __lowercase , ) , total=len(__lowercase ) , ):
extremes_list.append(__lowercase )
return extremes_list
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.85 ):
"""simple docstring"""
UpperCamelCase = make_duplicate_clusters(__lowercase , __lowercase )
UpperCamelCase = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase = {}
UpperCamelCase = find_extremes(__lowercase , __lowercase , __lowercase )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase = element
UpperCamelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=__lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase = element["base_index"] in extreme_dict
if element["is_extreme"]:
UpperCamelCase = extreme_dict[element["base_index"]]["copies"]
print(F"Original dataset size: {len(__lowercase )}" )
print(F"Number of duplicate clusters: {len(__lowercase )}" )
print(F"Files in duplicate cluster: {len(__lowercase )}" )
print(F"Unique files in duplicate cluster: {len(__lowercase )}" )
print(F"Filtered dataset size: {len(__lowercase )}" )
return ds_filter, duplicate_clusters
| 153 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 0
snake_case = False
snake_case = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ = torch.nn.Linear(1_00, 2_00)
lowerCamelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ = ''''''
lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 79 | 0 |
"""simple docstring"""
import math
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if number < 1:
SCREAMING_SNAKE_CASE__ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE__ = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE__ = [3, 5]
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
for block in range(1 , __UpperCamelCase ):
for _ in range(__UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__lowerCamelCase : int = 0
try:
__lowerCamelCase : Any = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 361 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCamelCase : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 204 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Any = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Dict = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[int] = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_: Union[str, Any] = BitConfig(
conv_layer=_UpperCAmelCase , num_labels=10_00 , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def A_ ( _UpperCAmelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_: int = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
SCREAMING_SNAKE_CASE_: Any = "bit." + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_: int = "bit.encoder." + name
return name
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: str = get_config(_UpperCAmelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_: Tuple = create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_: List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[Any] = state_dict.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = val.squeeze() if "head" in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_: Union[str, Any] = BitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = transform.transforms
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_: Tuple = BitImageProcessor(
do_resize=_UpperCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE_: Dict = prepare_img()
SCREAMING_SNAKE_CASE_: int = transform(_UpperCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_: str = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_: int = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCAmelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase = ["""bert-base-uncased""", """bert-base-cased"""]
UpperCamelCase = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class _lowerCamelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
super().__init__()
A_ : Any = tokenizer
A_ : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : int = TFAutoModel.from_config(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Optional[int] = self.tokenizer(_SCREAMING_SNAKE_CASE )
A_ : Any = self.bert(**_SCREAMING_SNAKE_CASE )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
super().setUp()
A_ : List[Any] = [
BertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
A_ : Dict = [TFBertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , use_fast_bert_tokenizer=_SCREAMING_SNAKE_CASE )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
A_ : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self )->List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
A_ : Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding='''longest''' )
A_ : Optional[Any] = tf_tokenizer(_SCREAMING_SNAKE_CASE )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : Dict = tf_tokenizer(self.paired_sentences )
A_ : str = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : Union[str, Any] = tf.function(_SCREAMING_SNAKE_CASE )
for test_inputs in (self.test_sentences, self.paired_sentences):
A_ : Any = tf.constant(_SCREAMING_SNAKE_CASE )
A_ : int = compiled_tokenizer(_SCREAMING_SNAKE_CASE )
A_ : str = tf_tokenizer(_SCREAMING_SNAKE_CASE )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self )->List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : Tuple = ModelToSave(tokenizer=_SCREAMING_SNAKE_CASE )
A_ : Any = tf.convert_to_tensor(self.test_sentences )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ) / '''saved.model'''
model.save(_SCREAMING_SNAKE_CASE )
A_ : str = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
A_ : str = loaded_model(_SCREAMING_SNAKE_CASE )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 65 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE )]
A_ : Any = []
def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Dict = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : Optional[int] = arr[i], arr[0]
else:
A_ , A_ : Tuple = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE ) )
c[i] += 1
A_ : Optional[int] = 0
else:
A_ : str = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 65 | 1 |
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase : Tuple = re.compile(r"([A-Z]+)([A-Z][a-z])")
_UpperCAmelCase : Optional[Any] = re.compile(r"([a-z\d])([A-Z])")
_UpperCAmelCase : Union[str, Any] = re.compile(r"(?<!_)_(?!_)")
_UpperCAmelCase : List[str] = re.compile(r"(_{2,})")
_UpperCAmelCase : List[Any] = r"^\w+(\.\w+)*$"
_UpperCAmelCase : str = r"<>:/\|?*"
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Any = _uppercase_uppercase_re.sub(r"\1_\2", lowerCamelCase )
lowercase :Optional[int] = _lowercase_uppercase_re.sub(r"\1_\2", lowerCamelCase )
return name.lower()
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[Any] = _single_underscore_re.split(lowerCamelCase )
lowercase :Dict = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def UpperCAmelCase__ ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re, lowerCamelCase ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(lowerCamelCase )}-{split}"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
lowercase :Union[str, Any] = filename_prefix_for_split(lowerCamelCase, lowerCamelCase )
if filetype_suffix:
prefix += F".{filetype_suffix}"
lowercase :int = os.path.join(lowerCamelCase, lowerCamelCase )
return F"{filepath}*"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None ):
lowercase :int = filename_prefix_for_split(lowerCamelCase, lowerCamelCase )
lowercase :str = os.path.join(lowerCamelCase, lowerCamelCase )
if shard_lengths:
lowercase :Optional[Any] = len(lowerCamelCase )
lowercase :List[str] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
lowercase :str = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase :Optional[Any] = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 236 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'camembert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 358 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( __lowercase ):
def __init__(self : Any , snake_case_ : int , snake_case_ : Optional[int] ):
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__(self : Optional[Any] , snake_case_ : int = 1 , snake_case_ : Optional[torch.Generator] = None , snake_case_ : int = 5_0 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , **snake_case_ : List[Any] , ):
__a : List[str] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A_ , )
__a : int = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a : int = self.unet(A_ , A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a : Optional[int] = self.scheduler.step(A_ , A_ , A_ ).prev_sample
__a : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(A_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A_ ), "This is a local test"
| 216 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , *A_ : str , **A_ : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204 | 0 |
import os
from math import logaa
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "base_exp.txt" ) -> int:
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ : str = list(map(SCREAMING_SNAKE_CASE_ , line.split(',' ) ) )
if x * logaa(SCREAMING_SNAKE_CASE_ ) > largest:
lowerCAmelCase__ : List[str] = x * logaa(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = i + 1
return result
if __name__ == "__main__":
print(solution()) | 307 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 307 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = {}
UpperCAmelCase__ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase__ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase__ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase__ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase__ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__A )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__A )
}
for i in range(__A ):
UpperCAmelCase__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase__ = renew_vae_attention_paths(__A )
UpperCAmelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
conv_attn_to_linear(__A )
for i in range(__A ):
UpperCAmelCase__ = num_up_blocks - 1 - i
UpperCAmelCase__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase__ = renew_vae_attention_paths(__A )
UpperCAmelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
conv_attn_to_linear(__A )
return new_checkpoint
def lowerCAmelCase_ ( __A, __A, ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase__ = io.BytesIO(r.content )
UpperCAmelCase__ = OmegaConf.load(__A )
UpperCAmelCase__ = 512
UpperCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase__ = {}
with safe_open(__A, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase__ = f.get_tensor(__A )
else:
UpperCAmelCase__ = torch.load(__A, map_location=__A )["state_dict"]
# Convert the VAE model.
UpperCAmelCase__ = create_vae_diffusers_config(__A, image_size=__A )
UpperCAmelCase__ = custom_convert_ldm_vae_checkpoint(__A, __A )
UpperCAmelCase__ = AutoencoderKL(**__A )
vae.load_state_dict(__A )
vae.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
UpperCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 65 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return x + 2
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Optional[Any] ):
snake_case_ : Union[str, Any] = """x = 3"""
snake_case_ : str = {}
snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3} )
snake_case_ : List[Any] = """x = y"""
snake_case_ : Optional[Any] = {"""y""": 5}
snake_case_ : Tuple = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 5, """y""": 5} )
def a__ ( self :Union[str, Any] ):
snake_case_ : Union[str, Any] = """y = add_two(x)"""
snake_case_ : Tuple = {"""x""": 3}
snake_case_ : List[str] = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def a__ ( self :Optional[int] ):
snake_case_ : Union[str, Any] = """x = 3"""
snake_case_ : Optional[int] = {}
snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3} )
def a__ ( self :Optional[int] ):
snake_case_ : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
snake_case_ : Dict = {"""x""": 3}
snake_case_ : Tuple = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def a__ ( self :Optional[Any] ):
snake_case_ : Any = """x = 3\ny = 5"""
snake_case_ : Tuple = {}
snake_case_ : List[Any] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 5} )
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = """text = f'This is x: {x}.'"""
snake_case_ : int = {"""x""": 3}
snake_case_ : Dict = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """text""": """This is x: 3."""} )
def a__ ( self :List[Any] ):
snake_case_ : Optional[int] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
snake_case_ : int = {"""x""": 3}
snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 2} )
snake_case_ : Dict = {"""x""": 8}
snake_case_ : Any = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 8, """y""": 5} )
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = """test_list = [x, add_two(x)]"""
snake_case_ : Tuple = {"""x""": 3}
snake_case_ : Any = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,[3, 5] )
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_list""": [3, 5]} )
def a__ ( self :Any ):
snake_case_ : int = """y = x"""
snake_case_ : int = {"""x""": 3}
snake_case_ : Optional[int] = evaluate(_UpperCamelCase ,{} ,state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """y""": 3} )
def a__ ( self :Any ):
snake_case_ : str = """test_list = [x, add_two(x)]\ntest_list[1]"""
snake_case_ : Tuple = {"""x""": 3}
snake_case_ : Union[str, Any] = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_list""": [3, 5]} )
snake_case_ : List[Any] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
snake_case_ : Tuple = {"""x""": 3}
snake_case_ : str = evaluate(_UpperCamelCase ,{"""add_two""": add_two} ,state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def a__ ( self :Dict ):
snake_case_ : List[Any] = """x = 0\nfor i in range(3):\n x = i"""
snake_case_ : Dict = {}
snake_case_ : Dict = evaluate(_UpperCamelCase ,{"""range""": range} ,state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase ,{"""x""": 2, """i""": 2} ) | 353 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs | 8 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """bert-base-cased"""
__SCREAMING_SNAKE_CASE : Any = """fp16"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """bf16"""
__SCREAMING_SNAKE_CASE : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Any = F"""{i + 1}"""
_UpperCAmelCase : str = strategy
with mockenv_context(**A ):
_UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _A ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**A ):
_UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _A ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
_UpperCAmelCase : Optional[int] = self.dist_env.copy()
_UpperCAmelCase : Tuple = state_dict_type
with mockenv_context(**A ):
_UpperCAmelCase : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase : Optional[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase : Any = "2000"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Tuple = "TRANSFORMER_BASED_WRAP"
_UpperCAmelCase : Tuple = "T5Layer"
with mockenv_context(**A ):
_UpperCAmelCase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = "SIZE_BASED_WRAP"
_UpperCAmelCase : List[str] = "0"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _A ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : str = mp_dtype
with mockenv_context(**A ):
_UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase : Union[str, Any] = torch.bfloataa
_UpperCAmelCase : Optional[int] = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Dict = str(A ).lower()
with mockenv_context(**A ):
_UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = 0.82
_UpperCAmelCase : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_UpperCAmelCase : Tuple = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase : Tuple = 160
_UpperCAmelCase : Any = 160
_UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_UpperCAmelCase : Tuple = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(A ):
_UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase : Optional[Any] = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
_UpperCAmelCase : Optional[int] = cmd_config[:-1]
_UpperCAmelCase : List[str] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 31 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict ):
# Initialise PyTorch model
A__ = MobileBertConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
A__ = MobileBertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
A__ = load_tf_weights_in_mobilebert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : int =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 367 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Tuple =trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : Optional[Any] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple =parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : int =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : Union[str, Any] =args.per_device_eval_batch_size
__lowerCAmelCase : List[Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Tuple =True
__lowerCAmelCase : int ="temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Tuple ="temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[int] ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] =[network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : Any =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : Optional[Any] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : int =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Dict =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
A__ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
A__ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
A__ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : str =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : List[Any] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Optional[Any] =raw_datasets["validation"].column_names
__lowerCAmelCase : Optional[Any] ="question" if "question" in column_names else column_names[0]
__lowerCAmelCase : str ="context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Optional[Any] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Any =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__lowerCAmelCase : Any =min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(_lowerCamelCase )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCAmelCase : str =raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : Union[str, Any] =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : List[Any] =default_data_collator
__lowerCAmelCase : List[Any] =eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
A__ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowerCAmelCase : Tuple =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Any =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : List[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : int =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Optional[Any] =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__lowerCAmelCase : str =0.0
__lowerCAmelCase : Tuple =0
__lowerCAmelCase : List[str] =timeit.default_timer()
__lowerCAmelCase : Union[str, Any] =None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Dict =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[Any] =outputs
__lowerCAmelCase : Tuple =torch.tensor(start_logits)
__lowerCAmelCase : Tuple =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : Tuple =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase : int =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : List[Any] =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase : Dict =nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : Optional[int] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Optional[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : Optional[Any] =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 123 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = '''convbert'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = vocab_size
a_ : List[str] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : int = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Optional[int] = embedding_size
a_ : List[Any] = head_ratio
a_ : List[Any] = conv_kernel_size
a_ : Tuple = num_groups
a_ : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 32 | 1 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCamelCase ( __lowerCAmelCase ):
def _lowerCamelCase ( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> List[str]:
if tokenize_kwargs is None:
snake_case = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case = truncation
snake_case = tokenize_kwargs
snake_case = {}
if return_tensors is not None:
snake_case = return_tensors
return preprocess_params, {}, postprocess_params
def _lowerCamelCase ( self, lowercase_, **lowercase_ ) -> Dict[str, GenericTensor]:
snake_case = self.framework
snake_case = self.tokenizer(lowercase_, return_tensors=lowercase_, **lowercase_ )
return model_inputs
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = self.model(**lowercase_ )
return model_outputs
def _lowerCamelCase ( self, lowercase_, lowercase_=False ) -> Tuple:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *lowercase_, **lowercase_ ) -> List[Any]:
return super().__call__(*lowercase_, **lowercase_ )
| 332 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Optional[Any] = """▁"""
__UpperCamelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = BigBirdTokenizer
_UpperCAmelCase = BigBirdTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
super().setUp()
snake_case__ = self.tokenizer_class(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self: int ) -> Any:
snake_case__ = '<s>'
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(UpperCamelCase ) , 10_04 )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = 'I was born in 92000, and this is falsé.'
snake_case__ = tokenizer.tokenize(UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(UpperCamelCase )
snake_case__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Any:
snake_case__ = BigBirdTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
snake_case__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCAmelCase_ ( self: List[str] ) -> Any:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = 'Hello World!'
snake_case__ = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
snake_case__ = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@require_torch
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = ' '.join(UpperCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(UpperCamelCase , return_tensors='pt' , return_token_type_ids=UpperCamelCase )
snake_case__ = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCamelCase )
snake_case__ = BigBirdConfig(attention_type='original_full' )
snake_case__ = BigBirdModel(UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase )
model(**UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case__ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
snake_case__ = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
# fmt: off
snake_case__ = {'input_ids': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 307 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCamelCase = True
except (ImportError, AttributeError):
__UpperCamelCase = object
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Any:
"""simple docstring"""
pass
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger("transformers-cli/serving")
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers )
class _A ( __lowercase ):
lowercase__: dict
class _A ( __lowercase ):
lowercase__: List[str]
lowercase__: Optional[List[int]]
class _A ( __lowercase ):
lowercase__: str
class _A ( __lowercase ):
lowercase__: Any
class _A ( __lowercase ):
@staticmethod
def lowercase__ ( __magic_name__ : ArgumentParser ) -> Dict:
"""simple docstring"""
__snake_case : int = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__magic_name__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__magic_name__ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__magic_name__ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__magic_name__ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__magic_name__ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__magic_name__ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__magic_name__ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__magic_name__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__magic_name__ )
def __init__( self : Any , __magic_name__ : Pipeline , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = pipeline
__snake_case : str = host
__snake_case : List[Any] = port
__snake_case : Any = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__snake_case : int = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase__ ( self : Any , __magic_name__ : str = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) ) -> Any:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._pipeline.tokenizer.tokenize(__magic_name__ )
if return_ids:
__snake_case : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(__magic_name__ )
return ServeTokenizeResult(tokens=__magic_name__ , tokens_ids=__magic_name__ )
else:
return ServeTokenizeResult(tokens=__magic_name__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__magic_name__ )} )
def lowercase__ ( self : Any , __magic_name__ : List[int] = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) , ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Dict = self._pipeline.tokenizer.decode(__magic_name__ , __magic_name__ , __magic_name__ )
return ServeDeTokenizeResult(model="""""" , text=__magic_name__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__magic_name__ )} )
async def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any]=Body(__magic_name__ , embed=__magic_name__ ) ) -> Optional[int]:
"""simple docstring"""
if len(__magic_name__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__snake_case : Union[str, Any] = self._pipeline(__magic_name__ )
return ServeForwardResult(output=__magic_name__ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(__magic_name__ )} )
| 13 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""Input value must be a \'int\' type""" )
return bin(SCREAMING_SNAKE_CASE__ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 0 |
from math import factorial
class __snake_case :
def __init__( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str =real
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Any =[1] * rank
else:
UpperCAmelCase : Optional[Any] =rank
def __repr__( self ) -> Dict:
'''simple docstring'''
return (
f'''{self.real}+'''
f'''{'+'.join(str(snake_case__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__( self , snake_case__ ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
UpperCAmelCase : Tuple =self.duals.copy()
UpperCAmelCase : int =other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
UpperCAmelCase : List[Any] =[]
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
__lowerCamelCase : Dict = __add__
def __sub__( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self + other * -1
def __mul__( self , snake_case__ ) -> Dict:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : List[Any] =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
UpperCAmelCase : str =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
__lowerCamelCase : Any = __mul__
def __truediv__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__( self , snake_case__ ) -> Any:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCAmelCase : Union[str, Any] =self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
if not callable(__lowerCAmelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCAmelCase : Union[str, Any] =Dual(__lowerCAmelCase , 1 )
UpperCAmelCase : Optional[int] =func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 78 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__snake_case = threading.Lock()
__snake_case = None
__snake_case = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__snake_case = logging.WARNING
__snake_case = True
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =os.getenv('''TRANSFORMERS_VERBOSITY''' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowerCAmelCase_ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase : Union[str, Any] =logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase : str =sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase : Optional[int] =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase : str =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase : Optional[Any] =None
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return log_levels
def lowerCAmelCase_ ( __lowerCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase : int =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : int =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : Tuple =True
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =_get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase : str =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : int =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_advice
@functools.lru_cache(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_once
class __snake_case :
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase : Any =args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , snake_case__ ) -> str:
'''simple docstring'''
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> int:
'''simple docstring'''
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
return
class __snake_case :
def __call__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def lowerCAmelCase_ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : Dict =True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : List[str] =False
hf_hub_utils.disable_progress_bars()
| 78 | 1 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A ="src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A =direct_transformers_import(PATH_TO_TRANSFORMERS)
__A =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A =re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__A ={
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : Dict = inspect.getsource(__lowerCamelCase )
UpperCAmelCase__ : str = _re_checkpoint.findall(__lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
UpperCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : List[Any] = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : List[Any] = ckpt_name
break
return checkpoint
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Optional[int] = get_checkpoint_from_config_class(__lowerCamelCase )
UpperCAmelCase__ : str = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCAmelCase__ : int = "\n".join(sorted(__lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 163 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Dict:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.FloatTensor] = None , **lowerCamelCase : Any , ) -> Optional[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Tuple = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCamelCase )}.' )
# get prompt text embeddings
__snake_case : Tuple = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : int = text_embeddings.shape
__snake_case : Any = text_embeddings.repeat(1 , lowerCamelCase , 1 )
__snake_case : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : Tuple = negative_prompt
__snake_case : str = text_input_ids.shape[-1]
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Tuple = uncond_embeddings.shape[1]
__snake_case : Any = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
__snake_case : Union[str, Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : str = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : Tuple = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : Any = max(-dx , 0 )
__snake_case : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Optional[Any] = {}
if accepts_eta:
__snake_case : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : str = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = 1 / 0.1_82_15 * latents
__snake_case : Dict = self.vae.decode(lowerCamelCase ).sample
__snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
__snake_case , __snake_case : str = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Dict = None
if output_type == "pil":
__snake_case : Any = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 123 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[int] = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase_ : List[Any] = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
UpperCamelCase_ : List[Any] = {'''bert_for_seq_generation''': 512}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<::::>" ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> List[str]:
return self.sp_model.get_piece_size()
def _lowercase ( self ) -> int:
_snake_case = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
_snake_case = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case = []
_snake_case = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
_snake_case = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE ,"wb" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 142 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _UpperCamelCase: Tuple ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = os.path.join(args.tf_model_dir , "parameters.json" )
_snake_case = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_snake_case = args.output + ".pt"
_snake_case = OrderedDict()
with tf.device("/CPU:0" ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_snake_case = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_snake_case = 8
_snake_case = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.feed_forward.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.feed_forward.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
_snake_case = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.self_attn.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.self_attn.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_snake_case = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_snake_case = "model.%s.weight" % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
_snake_case = "lm_head.weight"
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
_snake_case = "final_logits_bias"
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
_snake_case = "model.last_project.weight"
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
_snake_case = "model.last_project.bias"
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase_ : Tuple = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
UpperCamelCase_ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 142 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowercase : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = {}
with open(snake_case_ , '''r''' ) as file:
for line_number, line in enumerate(snake_case_ ):
__UpperCAmelCase = line.strip()
if line:
__UpperCAmelCase = line.split()
__UpperCAmelCase = line_number
__UpperCAmelCase = words[0]
__UpperCAmelCase = value
return result
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :str , snake_case_ :Tuple ):
for attribute in key.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
__UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
__UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
__UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase = value[0]
else:
__UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase = value
elif weight_type == "weight_g":
__UpperCAmelCase = value
elif weight_type == "weight_v":
__UpperCAmelCase = value
elif weight_type == "bias":
__UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase__ ( snake_case_ :Dict , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
__UpperCAmelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase = '''.'''.join([key, hf_param_name] )
else:
__UpperCAmelCase = key
__UpperCAmelCase = value if '''lm_head''' in full_key else value[0]
_lowercase : Any = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Tuple , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=None ):
__UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase = True
if "*" in mapped_key:
__UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2]
__UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase = '''weight_v'''
elif "bias" in name:
__UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase = '''weight'''
else:
__UpperCAmelCase = None
if hf_dict is not None:
rename_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return is_used
return is_used
def lowercase__ ( snake_case_ :int , snake_case_ :str , snake_case_ :List[Any] ):
__UpperCAmelCase = []
__UpperCAmelCase = fairseq_model.state_dict()
__UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase = True
else:
__UpperCAmelCase = load_wavaveca_layer(snake_case_ , snake_case_ , snake_case_ )
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase = name.split('''.''' )
__UpperCAmelCase = int(items[0] )
__UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowercase__ ( snake_case_ :Dict , snake_case_ :str , snake_case_ :Optional[Any]=None , snake_case_ :List[Any]=None , snake_case_ :Tuple=True , snake_case_ :Optional[Any]=False ):
if config_path is not None:
__UpperCAmelCase = WavaVecaConfig.from_pretrained(snake_case_ )
else:
__UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase = read_txt_into_dict(snake_case_ )
__UpperCAmelCase = idalabel
__UpperCAmelCase = WavaVecaForSequenceClassification(snake_case_ )
__UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
feature_extractor.save_pretrained(snake_case_ )
elif is_finetuned:
if dict_path:
__UpperCAmelCase = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase = target_dict.pad_index
__UpperCAmelCase = target_dict.bos_index
__UpperCAmelCase = target_dict.eos_index
__UpperCAmelCase = len(target_dict.symbols )
__UpperCAmelCase = os.path.join(snake_case_ , '''vocab.json''' )
if not os.path.isdir(snake_case_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase = 0
__UpperCAmelCase = 1
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case_ , snake_case_ )
__UpperCAmelCase = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case_ , )
__UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
__UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__UpperCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__UpperCAmelCase = WavaVecaForCTC(snake_case_ )
else:
__UpperCAmelCase = WavaVecaForPreTraining(snake_case_ )
if is_finetuned or is_seq_class:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
__UpperCAmelCase = fairseq.tasks.setup_task(snake_case_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case_ )
__UpperCAmelCase = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowercase : Dict = parser.parse_args()
_lowercase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 332 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = dataset
A : List[str] = process
A : List[str] = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self, lowerCamelCase__ ):
A : Union[str, Any] = self.dataset[i]
A : Dict = self.process(lowerCamelCase__, **self.params )
return processed
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : str = loader
A : int = infer
A : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A : Union[str, Any] = None
A : Optional[Any] = loader_batch_size
# Internal bookkeeping
A : List[Any] = None
A : Optional[int] = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
A : Dict = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A : str = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
# Convert ModelOutput to tuple first
A : List[str] = element.to_tuple()
if isinstance(element[0], torch.Tensor ):
A : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
A : str = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__, lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
A : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
A : List[str] = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A : Tuple = None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A : Optional[Any] = np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A : List[str] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A : List[Any] = next(self.iterator )
A : int = self.infer(lowerCamelCase__, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__, torch.Tensor ):
A : Optional[Any] = processed
else:
A : Tuple = list(processed.keys() )[0]
A : List[Any] = processed[key]
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Dict = len(lowerCamelCase__ )
else:
A : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A : Tuple = observed_batch_size
# Setting internal index to unwrap the batch
A : Tuple = processed
A : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def __iter__( self ):
A : Optional[Any] = iter(self.loader )
A : Union[str, Any] = None
return self
def _lowerCAmelCase ( self ):
if self.subiterator is None:
A : Optional[int] = self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
A : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A : str = self.infer(next(self.iterator ), **self.params )
A : int = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __iter__( self ):
A : Tuple = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A : Optional[int] = False
A : int = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A : int = self.loader_batch_item()
A : Union[str, Any] = item.pop("""is_last""" )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
A : str = self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__, torch.Tensor ):
A : int = processed
else:
A : List[str] = list(processed.keys() )[0]
A : int = processed[key]
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : str = len(lowerCamelCase__ )
else:
A : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A : Any = observed_batch_size
A : Optional[Any] = processed
A : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
A : Optional[Any] = self.loader_batch_item()
A : List[Any] = item.pop("""is_last""" )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
A : Union[str, Any] = processed
A : Any = item.pop("""is_last""" )
accumulator.append(lowerCamelCase__ )
return accumulator
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
A : str = dataset
A : Optional[Any] = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self, lowerCamelCase__ ):
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = dataset
A : Optional[Any] = keya
A : Optional[Any] = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self, lowerCamelCase__ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 115 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase : Any = True
except (ImportError, AttributeError):
lowerCAmelCase : Any = object
def A_ ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = logging.get_logger("""transformers-cli/serving""")
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_UpperCAmelCase , args.host , args.port , args.workers )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : dict
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str]
_UpperCAmelCase : Optional[List[int]]
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : ArgumentParser):
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints.")
serve_parser.add_argument(
"--task" , type=lowerCAmelCase__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=lowerCAmelCase__ , default="localhost" , help="Interface the server will listen on.")
serve_parser.add_argument("--port" , type=lowerCAmelCase__ , default=8888 , help="Port the serving will listen to.")
serve_parser.add_argument("--workers" , type=lowerCAmelCase__ , default=1 , help="Number of http workers")
serve_parser.add_argument("--model" , type=lowerCAmelCase__ , help="Model's name or path to stored model.")
serve_parser.add_argument("--config" , type=lowerCAmelCase__ , help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer" , type=lowerCAmelCase__ , help="Tokenizer name to use.")
serve_parser.add_argument(
"--device" , type=lowerCAmelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=lowerCAmelCase__)
def __init__( self : Optional[int] , lowerCAmelCase__ : Pipeline , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: int = pipeline
SCREAMING_SNAKE_CASE_: Dict = host
SCREAMING_SNAKE_CASE_: str = port
SCREAMING_SNAKE_CASE_: List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately.")
else:
logger.info(F"Serving model over {host}:{port}")
SCREAMING_SNAKE_CASE_: Tuple = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
] , timeout=600 , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
run(self._app , host=self.host , port=self.port , workers=self.workers)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str = Body(lowerCAmelCase__ , embed=lowerCAmelCase__) , lowerCAmelCase__ : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__)):
try:
SCREAMING_SNAKE_CASE_: Optional[Any] = self._pipeline.tokenizer.tokenize(lowerCAmelCase__)
if return_ids:
SCREAMING_SNAKE_CASE_: int = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__)
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__)
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(lowerCAmelCase__)})
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] = Body(lowerCAmelCase__ , embed=lowerCAmelCase__) , lowerCAmelCase__ : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__) , lowerCAmelCase__ : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__) , ):
try:
SCREAMING_SNAKE_CASE_: Optional[Any] = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return ServeDeTokenizeResult(model="" , text=lowerCAmelCase__)
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(lowerCAmelCase__)})
async def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[int]=Body(lowerCAmelCase__ , embed=lowerCAmelCase__)):
# Check we don't have empty string
if len(lowerCAmelCase__) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
SCREAMING_SNAKE_CASE_: Optional[int] = self._pipeline(lowerCAmelCase__)
return ServeForwardResult(output=lowerCAmelCase__)
except Exception as e:
raise HTTPException(500 , {"error": str(lowerCAmelCase__)})
| 13 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 1 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (boundary[1] - boundary[0]) / steps
SCREAMING_SNAKE_CASE__ = boundary[0]
SCREAMING_SNAKE_CASE__ = boundary[1]
SCREAMING_SNAKE_CASE__ = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a + h
while x < (b - h):
yield x
SCREAMING_SNAKE_CASE__ = x + h
def _lowercase ( UpperCamelCase_ ) -> int: # enter your function here
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0.0 # Lower bound of integration
SCREAMING_SNAKE_CASE__ = 1.0 # Upper bound of integration
SCREAMING_SNAKE_CASE__ = 10.0 # define number of steps or resolution
SCREAMING_SNAKE_CASE__ = [a, b] # define boundary of integration
SCREAMING_SNAKE_CASE__ = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(F'y = {y}' )
if __name__ == "__main__":
main()
| 367 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =FlaxAutoencoderKL
@property
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 0 |
"""simple docstring"""
import string
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = ''
for i in sequence:
UpperCAmelCase = ord(lowercase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = string.ascii_letters
UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase_ )] if c in letters else c for c in sequence )
def _lowerCAmelCase ( ):
from timeit import timeit
print('Running performance benchmarks...' )
UpperCAmelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowercase_ )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=lowercase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Any = {}
_snake_case : int = job["""started_at"""]
_snake_case : Optional[int] = job["""completed_at"""]
_snake_case : List[str] = date_parser.parse(snake_case__ )
_snake_case : Any = date_parser.parse(snake_case__ )
_snake_case : List[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_snake_case : List[Any] = start
_snake_case : Tuple = end
_snake_case : List[str] = duration_in_min
return job_info
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[int]=None ):
"""simple docstring"""
_snake_case : Union[str, Any] = None
if token is not None:
_snake_case : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_snake_case : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_snake_case : Optional[Any] = requests.get(snake_case__ , headers=snake_case__ ).json()
_snake_case : Optional[Any] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
_snake_case : Dict = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(snake_case__ ):
_snake_case : str = requests.get(url + F"&page={i + 2}" , headers=snake_case__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A_ = parser.parse_args()
A_ = get_job_time(args.workflow_run_id)
A_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 132 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = """https://pypi.org/pypi/diffusers/json"""
_snake_case : Optional[int] = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : str = Path(snake_case__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_snake_case : List[Any] = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : List[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = False
_snake_case : Any = [module_file]
_snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
_snake_case : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
_snake_case : Dict = Path(snake_case__ ).parent
_snake_case : Dict = [str(module_path / m ) for m in new_imports]
_snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_snake_case : str = [F"{f}.py" for f in new_import_files]
_snake_case : Dict = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
# Imports of the form `import xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
_snake_case : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_snake_case : Any = list(set(snake_case__ ) )
_snake_case : int = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" )
return get_relative_imports(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = module_path.replace(os.path.sep , """.""" )
_snake_case : int = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_snake_case : Tuple = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
_snake_case : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
_snake_case : List[str] = cls
return pipeline_class
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ):
"""simple docstring"""
_snake_case : List[str] = str(snake_case__ )
_snake_case : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
_snake_case : List[str] = module_file_or_url
_snake_case : Optional[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_snake_case : Tuple = get_diffusers_versions()
# cut ".dev0"
_snake_case : Union[str, Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_snake_case : int = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
_snake_case : Optional[Any] = F"v{revision}"
elif revision == "main":
_snake_case : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
_snake_case : List[str] = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
_snake_case : Dict = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Union[str, Any] = """git"""
_snake_case : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
_snake_case : str = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Optional[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
_snake_case : int = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
_snake_case : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
_snake_case : Any = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
_snake_case : Any = F"{module_needed}.py"
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Any = use_auth_token
elif use_auth_token is True:
_snake_case : int = HfFolder.get_token()
else:
_snake_case : Optional[int] = None
_snake_case : int = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_snake_case : int = submodule_path / commit_hash
_snake_case : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , F"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Tuple , ):
"""simple docstring"""
_snake_case : Union[str, Any] = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
| 132 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : str = logging.get_logger(__name__)
_A : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_A : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = {}
with open(UpperCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(UpperCAmelCase ):
lowerCamelCase__ : int = line.strip()
if line:
lowerCamelCase__ : Optional[Any] = line.split()
lowerCamelCase__ : Optional[Any] = line_number
lowerCamelCase__ : int = words[0]
lowerCamelCase__ : Tuple = value
return result
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
lowerCamelCase__ : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCamelCase__ : Optional[int] = '''param'''
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Optional[Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = shape_pointer.shape
# let's reduce dimension
lowerCamelCase__ : str = value[0]
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
lowerCamelCase__ : Any = value
elif weight_type == "weight_v":
lowerCamelCase__ : Tuple = value
elif weight_type == "bias":
lowerCamelCase__ : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
lowerCamelCase__ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = value
else:
lowerCamelCase__ : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
lowerCamelCase__ : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCamelCase__ : str = '''param'''
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Any = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Dict = '''.'''.join([key, hf_param_name] )
else:
lowerCamelCase__ : int = key
lowerCamelCase__ : Tuple = value if '''lm_head''' in full_key else value[0]
_A : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = False
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Dict = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ : Union[str, Any] = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[int] = name.split(UpperCAmelCase )[0].split('''.''' )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace('''*''' , UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : str = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : str = '''weight'''
else:
lowerCamelCase__ : List[str] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return is_used
return is_used
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Dict = []
lowerCamelCase__ : int = fairseq_model.state_dict()
lowerCamelCase__ : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase__ : Dict = True
else:
lowerCamelCase__ : List[str] = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ : Optional[Any] = name.split('''.''' )
lowerCamelCase__ : Optional[Any] = int(items[0] )
lowerCamelCase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase__ : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ) -> Any:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : Any = WavaVecaConfig.from_pretrained(UpperCAmelCase )
else:
lowerCamelCase__ : Dict = WavaVecaConfig()
if is_seq_class:
lowerCamelCase__ : Tuple = read_txt_into_dict(UpperCAmelCase )
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : Union[str, Any] = WavaVecaForSequenceClassification(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
feature_extractor.save_pretrained(UpperCAmelCase )
elif is_finetuned:
if dict_path:
lowerCamelCase__ : Tuple = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[str] = target_dict.pad_index
lowerCamelCase__ : Any = target_dict.bos_index
lowerCamelCase__ : Optional[int] = target_dict.eos_index
lowerCamelCase__ : Dict = len(target_dict.symbols )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : int = 0
lowerCamelCase__ : Union[str, Any] = 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase , )
lowerCamelCase__ : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[Any] = WavaVecaForCTC(UpperCAmelCase )
else:
lowerCamelCase__ : Dict = WavaVecaForPreTraining(UpperCAmelCase )
if is_finetuned or is_seq_class:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase__ : str = argparse.Namespace(task='''audio_pretraining''' )
lowerCamelCase__ : int = fairseq.tasks.setup_task(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_A : List[str] = parser.parse_args()
_A : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 142 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : str = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[Any] = mam_aaa['''args''']
lowerCamelCase__ : Dict = mam_aaa['''model''']
lowerCamelCase__ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowerCamelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCamelCase__ : Tuple = args.share_decoder_input_output_embed
lowerCamelCase__ : Dict = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCamelCase__ : str = SpeechaTextConfig(
vocab_size=UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=UpperCAmelCase , decoder_start_token_id=2 , early_stopping=UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Tuple = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 142 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : List[Any] ={
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __a ( A__ ):
_lowerCAmelCase : Dict = '''data2vec-vision'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str=7_68 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Any=30_72 , SCREAMING_SNAKE_CASE : List[str]="gelu" , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Any=1e-1_2 , SCREAMING_SNAKE_CASE : str=2_24 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE : List[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=0.4 , SCREAMING_SNAKE_CASE : Optional[int]=2_56 , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[Any]=2_55 , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : List[str] = image_size
UpperCamelCase__ : Any = patch_size
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : Union[str, Any] = use_mask_token
UpperCamelCase__ : int = use_absolute_position_embeddings
UpperCamelCase__ : List[str] = use_relative_position_bias
UpperCamelCase__ : Any = use_shared_relative_position_bias
UpperCamelCase__ : List[str] = layer_scale_init_value
UpperCamelCase__ : Dict = drop_path_rate
UpperCamelCase__ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ : List[Any] = out_indices
UpperCamelCase__ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ : Union[str, Any] = use_auxiliary_head
UpperCamelCase__ : Optional[Any] = auxiliary_loss_weight
UpperCamelCase__ : Tuple = auxiliary_channels
UpperCamelCase__ : Optional[int] = auxiliary_num_convs
UpperCamelCase__ : int = auxiliary_concat_input
UpperCamelCase__ : Any = semantic_loss_ignore_index
class __a ( A__ ):
_lowerCAmelCase : List[Any] = version.parse('''1.11''' )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return 1e-4 | 355 |
from ..utils import DummyObject, requires_backends
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : int = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] ) | 196 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : int = 2
__UpperCAmelCase : Tuple = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCamelCase )
if n > 1:
factors.append(_UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _a ( nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = hidden_states.shape
lowercase__ = jax.image.resize(
UpperCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase__ = self.conv(UpperCamelCase_ )
return hidden_states
class _a ( nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , UpperCamelCase_: Any ) -> Any:
"""simple docstring"""
lowercase__ = self.conv(UpperCamelCase_ )
return hidden_states
class _a ( nn.Module ):
_lowercase : int
_lowercase : int = None
_lowercase : float = 0.0
_lowercase : bool = None
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.in_channels if self.out_channels is None else self.out_channels
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = nn.Dense(UpperCamelCase_ , dtype=self.dtype )
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase__ = nn.Dropout(self.dropout_prob )
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase__ = None
if use_nin_shortcut:
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]=True ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = hidden_states
lowercase__ = self.norma(UpperCamelCase_ )
lowercase__ = nn.swish(UpperCamelCase_ )
lowercase__ = self.conva(UpperCamelCase_ )
lowercase__ = self.time_emb_proj(nn.swish(UpperCamelCase_ ) )
lowercase__ = jnp.expand_dims(jnp.expand_dims(UpperCamelCase_ , 1 ) , 1 )
lowercase__ = hidden_states + temb
lowercase__ = self.norma(UpperCamelCase_ )
lowercase__ = nn.swish(UpperCamelCase_ )
lowercase__ = self.dropout(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.conva(UpperCamelCase_ )
if self.conv_shortcut is not None:
lowercase__ = self.conv_shortcut(UpperCamelCase_ )
return hidden_states + residual
| 93 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return base * power(SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase = int(input('Enter the base: ').strip())
lowerCAmelCase = int(input('Enter the exponent: ').strip())
lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : int = torch.manual_seed(snake_case_ )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase ( self : Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase ( self : str ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase ( self : Any ):
self._test_save_load_local()
def lowerCamelCase ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 35 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """realm"""
def __init__( self :str , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :Optional[int]=768 , lowerCamelCase :Any=128 , lowerCamelCase :Tuple=12 , lowerCamelCase :str=12 , lowerCamelCase :List[str]=8 , lowerCamelCase :List[str]=3072 , lowerCamelCase :List[str]="gelu_new" , lowerCamelCase :int=0.1 , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :str=0.02 , lowerCamelCase :Tuple=1e-12 , lowerCamelCase :Dict=256 , lowerCamelCase :int=10 , lowerCamelCase :List[str]=1e-3 , lowerCamelCase :str=5 , lowerCamelCase :Optional[int]=320 , lowerCamelCase :Union[str, Any]=1335_3718 , lowerCamelCase :str=5000 , lowerCamelCase :str=1 , lowerCamelCase :List[Any]=0 , lowerCamelCase :Tuple=2 , **lowerCamelCase :Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = retriever_proj_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = num_candidates
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
# Reader config
UpperCAmelCase__ = span_hidden_size
UpperCAmelCase__ = max_span_width
UpperCAmelCase__ = reader_layer_norm_eps
UpperCAmelCase__ = reader_beam_size
UpperCAmelCase__ = reader_seq_len
# Retrieval config
UpperCAmelCase__ = num_block_records
UpperCAmelCase__ = searcher_beam_size
| 169 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a ( snake_case__ ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Union[str, Any]:
super().__init__()
_a = value_function
_a = unet
_a = scheduler
_a = env
_a = env.get_dataset()
_a = {}
for key in self.data.keys():
try:
_a = self.data[key].mean()
except: # noqa: E722
pass
_a = {}
for key in self.data.keys():
try:
_a = self.data[key].std()
except: # noqa: E722
pass
_a = env.observation_space.shape[0]
_a = env.action_space.shape[0]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Any:
return (x_in - self.means[key]) / self.stds[key]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Dict:
return x_in * self.stds[key] + self.means[key]
def __UpperCAmelCase ( self , __magic_name__ ) -> int:
if type(UpperCAmelCase_ ) is dict:
return {k: self.to_torch(UpperCAmelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase_ , device=self.unet.device )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
for key, val in cond.items():
_a = val.clone()
return x_in
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
_a = x.shape[0]
_a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_a = torch.full((batch_size,) , UpperCAmelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_a = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample
_a = torch.autograd.grad([y.sum()] , [x] )[0]
_a = self.scheduler._get_variance(UpperCAmelCase_ )
_a = torch.exp(0.5 * posterior_variance )
_a = model_std * grad
_a = 0
_a = x.detach()
_a = x + scale * grad
_a = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_a = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_a = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , predict_epsilon=UpperCAmelCase_ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_a = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_a = self.to_torch(UpperCAmelCase_ )
return x, y
def __call__( self , __magic_name__ , __magic_name__=64 , __magic_name__=32 , __magic_name__=2 , __magic_name__=0.1 ) -> int:
_a = self.normalize(UpperCAmelCase_ , 'observations' )
_a = obs[None].repeat(UpperCAmelCase_ , axis=0 )
_a = {0: self.to_torch(UpperCAmelCase_ )}
_a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_a = randn_tensor(UpperCAmelCase_ , device=self.unet.device )
_a = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_a = self.to_torch(UpperCAmelCase_ )
# run the diffusion process
_a = self.run_diffusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# sort output trajectories by value
_a = y.argsort(0 , descending=UpperCAmelCase_ ).squeeze()
_a = x[sorted_idx]
_a = sorted_values[:, :, : self.action_dim]
_a = actions.detach().cpu().numpy()
_a = self.de_normalize(UpperCAmelCase_ , key='actions' )
# select the action with the highest value
if y is not None:
_a = 0
else:
# if we didn't run value guiding, select a random action
_a = np.random.randint(0 , UpperCAmelCase_ )
_a = denorm_actions[selected_index, 0]
return denorm_actions
| 365 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A () -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _A () -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _A () -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head('https://huggingface.co' )
| 104 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 0, 0, 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ugly_nums.append(__lowerCAmelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 132 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _A :
def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int]=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : Any=32 , __magic_name__ : Dict=2 , __magic_name__ : List[str]=4 , __magic_name__ : str=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : int=0.02 , __magic_name__ : Any=3 , __magic_name__ : Any=4 , __magic_name__ : Dict=None , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = parent
__snake_case : Optional[int] = 13
__snake_case : Optional[int] = 7
__snake_case : str = True
__snake_case : Dict = True
__snake_case : int = True
__snake_case : Dict = True
__snake_case : List[str] = 99
__snake_case : Optional[Any] = 32
__snake_case : str = 2
__snake_case : Optional[Any] = 4
__snake_case : Union[str, Any] = 37
__snake_case : Optional[Any] = """gelu"""
__snake_case : Any = 0.1
__snake_case : List[str] = 0.1
__snake_case : List[Any] = 5_12
__snake_case : Union[str, Any] = 16
__snake_case : Any = 2
__snake_case : Dict = 0.02
__snake_case : Union[str, Any] = 3
__snake_case : Any = 4
__snake_case : List[Any] = None
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[Any] = None
__snake_case : Any = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = TFRoFormerModel(config=lowercase_ )
__snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Any = [input_ids, input_mask]
__snake_case : List[str] = model(lowercase_ )
__snake_case : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = True
__snake_case : int = TFRoFormerForCausalLM(config=lowercase_ )
__snake_case : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : int = model(lowercase_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
"""simple docstring"""
__snake_case : str = TFRoFormerForMaskedLM(config=lowercase_ )
__snake_case : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[Any] = TFRoFormerForSequenceClassification(config=lowercase_ )
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.num_choices
__snake_case : int = TFRoFormerForMultipleChoice(config=lowercase_ )
__snake_case : Tuple = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : Any = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
__snake_case : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : Optional[int] = TFRoFormerForTokenClassification(config=lowercase_ )
__snake_case : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = TFRoFormerForQuestionAnswering(config=lowercase_ )
__snake_case : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowercase__: List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__: Union[str, Any] = False
lowercase__: str = False
def lowercase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFRoFormerModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : str = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(lowercase_ )
@require_tf
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case : str = model(lowercase_ )[0]
# TODO Replace vocab size
__snake_case : Dict = 5_00_00
__snake_case : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__snake_case : str = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
@require_tf
class _A ( unittest.TestCase ):
lowercase__: List[str] = 1e-4
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = tf.constant([[4, 10]] )
__snake_case : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__snake_case : Tuple = emba(input_ids.shape )
__snake_case : List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__snake_case : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__snake_case : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class _A ( unittest.TestCase ):
lowercase__: int = 1e-4
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__snake_case : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__snake_case : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__snake_case : Optional[Any] = embed_positions([2, 16, 7_68] )[None, None, :, :]
__snake_case , __snake_case : str = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
__snake_case : List[str] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__snake_case : Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 355 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_A = logging.get_logger(__name__)
_A = {'''vocab_file''': '''spiece.model'''}
_A = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_A = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_A = 0
_A = 1
_A = 2
_A = 3
_A = 4
class A ( __UpperCamelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = 'left'
def __init__( self, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__="<s>", UpperCamelCase__="</s>", UpperCamelCase__="<unk>", UpperCamelCase__="<sep>", UpperCamelCase__="<pad>", UpperCamelCase__="<cls>", UpperCamelCase__="<mask>", UpperCamelCase__=["<eop>", "<eod>"], UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__, lowerCAmelCase__ ) else mask_token
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__, remove_space=lowerCAmelCase__, keep_accents=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
lowerCAmelCase_ = 3
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase_ = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase_ = inputs
lowerCAmelCase_ = outputs.replace('''``''', '''"''' ).replace('''\'\'''', '''"''' )
if not self.keep_accents:
lowerCAmelCase_ = unicodedata.normalize('''NFKD''', lowerCAmelCase__ )
lowerCAmelCase_ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
lowerCAmelCase_ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.preprocess_text(lowerCAmelCase__ )
lowerCAmelCase_ = self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__ )
lowerCAmelCase_ = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__, '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ = cur_pieces[1:]
else:
lowerCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__, ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = kwargs.pop('''use_source_tokenizer''', lowerCAmelCase__ )
lowerCAmelCase_ = self.convert_ids_to_tokens(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
lowerCAmelCase_ = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCAmelCase_ = ''.join(lowerCAmelCase__ )
lowerCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase_ = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1]
return ([0] * len(lowerCAmelCase__ )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 278 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[str] = PegasusTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = '</s>'
lowercase__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowercase__: Dict = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Tuple = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__: Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowercase__: Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowercase__: int = 'To ensure a smooth flow of bank resolutions.'
lowercase__: Any = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: str = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = ['This is going to be way too long.' * 150, 'short example']
lowercase__: Tuple = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Dict = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Any = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# fmt: off
lowercase__: List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Any = True
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Union[str, Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowercase__: List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Any = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = ['This is going to be way too long.' * 1_000, 'short example']
lowercase__: str = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Tuple = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Dict = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(
lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 196 | 0 |
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] ={
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = set()
# keep track of all the paths to be checked
lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase = queue.pop(0 )
# get the last node from the path
lowercase = path[-1]
if node not in explored:
lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> List[Any]:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase = [start]
lowercase = set(A__ )
# Keep tab on distances from `start` node.
lowercase = {start: 0, target: -1}
while queue:
lowercase = queue.pop(0 )
if node == target:
lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowercase : int = object()
# For specifying empty leaf dict `{}`
_lowercase : List[Any] = object()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
lowercase_ : Dict = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
def replace(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def snake_case_ ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Optional[Any] = _get_partition_rules()
lowercase_ : List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
lowercase_ : Optional[Any] = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__a = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def __snake_case( ) -> int:
snake_case__ : Dict = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case__ : List[Any] = parser.parse_args()
return args.f
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> List[str]:
snake_case__ : Dict = os.path.join(_lowerCAmelCase , f"{split}_results.json" )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(f"can't find {path}" )
__a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : int ):
snake_case__ : str = self.get_auto_remove_tmp_dir()
snake_case__ : str = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_flax_glue.main()
snake_case__ : Optional[int] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def lowerCamelCase ( self : str ):
snake_case__ : Any = self.get_auto_remove_tmp_dir()
snake_case__ : Optional[Any] = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_clm_flax.main()
snake_case__ : Optional[Any] = get_results(snake_case_ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ : Dict = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_summarization_flax.main()
snake_case__ : Any = get_results(snake_case_ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case__ : Any = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_mlm_flax.main()
snake_case__ : str = get_results(snake_case_ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case__ : str = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_ta_mlm_flax.main()
snake_case__ : int = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def lowerCamelCase ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case__ : Tuple = 7 if get_gpu_count() > 1 else 2
snake_case__ : Dict = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_flax_ner.main()
snake_case__ : Optional[int] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[str] = self.get_auto_remove_tmp_dir()
snake_case__ : str = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_qa.main()
snake_case__ : Optional[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 43 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
def __init__( self : Dict , lowercase : Optional[Any] , lowercase : Tuple=13 , lowercase : Optional[Any]=7 , lowercase : Any=True , lowercase : Union[str, Any]=True , lowercase : Optional[Any]=True , lowercase : str=True , lowercase : Optional[Any]=99 , lowercase : Dict=32 , lowercase : Tuple=2 , lowercase : Optional[int]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]="gelu" , lowercase : Tuple=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : List[Any]=512 , lowercase : Any=16 , lowercase : Optional[Any]=2 , lowercase : List[str]=0.02 , lowercase : Dict=3 , lowercase : List[Any]=4 , lowercase : str=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 37
UpperCAmelCase = '''gelu'''
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : List[str] , lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel(config=lowercase )
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , lowercase : Any , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = TFRoFormerForCausalLM(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : str , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , lowercase : Dict , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForSequenceClassification(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFRoFormerForMultipleChoice(config=lowercase )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Any , lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForTokenClassification(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , lowercase : int , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForQuestionAnswering(config=lowercase )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
__a : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__a : Tuple = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : List[Any] = False
__a : Dict = False
def A ( self : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase )
@require_tf
class _a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(lowercase )[0]
# TODO Replace vocab size
UpperCAmelCase = 50_000
UpperCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
@require_tf
class _a ( unittest.TestCase ):
__a : List[Any] = 1e-4
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = tf.constant([[4, 10]] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase = emba(input_ids.shape )
UpperCAmelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
@require_tf
class _a ( unittest.TestCase ):
__a : Dict = 1e-4
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCAmelCase = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase , UpperCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase , lowercase , lowercase )
UpperCAmelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
| 34 |
'''simple docstring'''
import torch
from torch import nn
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=False ):
super().__init__()
__lowercase = n_token
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [n_token]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowercase = nn.ModuleList()
__lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
else:
self.out_projs.append(lowercase__ )
self.out_layers.append(nn.Linear(lowercase__ ,lowercase__ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
self.out_layers.append(nn.Linear(lowercase__ ,r_idx - l_idx ) )
__lowercase = keep_order
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ):
if proj is None:
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowercase = nn.functional.linear(lowercase__ ,proj.t().contiguous() )
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Any=None ,lowercase__ : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowercase = hidden[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
__lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
__lowercase = labels != -1_0_0
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = (
-nn.functional.log_softmax(lowercase__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
if labels is None:
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = 0
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowercase = (labels >= l_idx) & (labels < r_idx)
__lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowercase = labels.index_select(0 ,lowercase__ ) - l_idx
__lowercase = head_logprob.index_select(0 ,lowercase__ )
__lowercase = hidden.index_select(0 ,lowercase__ )
else:
__lowercase = hidden
if i == 0:
if labels is not None:
__lowercase = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowercase = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,lowercase__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ):
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = head_logprob[:, -i] + tail_logprob_i
__lowercase = logprob_i
return out
| 104 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : List[str] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : int = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : str = do_pad
def _lowercase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Any = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w )
snake_case__ : Tuple = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Optional[Any] = self.size["shortest_edge"]
snake_case__ : List[Any] = self.size["shortest_edge"]
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__, snake_case__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[int] ):
snake_case__ : str = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image and target
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Optional[Any] = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Optional[int] = json.loads(f.read() )
snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 286 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = PhobertTokenizer
a_ = False
def _lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Dict = ["#version: 0.2", "l à</w>"]
snake_case__ : Optional[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : List[str] , **__A : Any ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "Tôi là VinAI Research"
snake_case__ : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = "Tôi là VinAI Research"
snake_case__ : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case__ : int = tokenizer.tokenize(__A )
print(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _snake_case :
def A__ ( self : List[str], __lowercase : Optional[int] ):
raise NotImplementedError()
def A__ ( self : List[Any] ):
raise NotImplementedError()
class _snake_case ( lowercase__):
def __init__( self : List[str], __lowercase : "AutoTokenizer", __lowercase : bool = False, **__lowercase : List[Any] ):
lowercase__ = tokenizer
lowercase__ = skip_prompt
lowercase__ = decode_kwargs
# variables used in the streaming process
lowercase__ = []
lowercase__ = 0
lowercase__ = True
def A__ ( self : Union[str, Any], __lowercase : Any ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowercase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase__ = text[self.print_len :]
self.print_len += len(__lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase__ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__lowercase )
self.on_finalized_text(__lowercase )
def A__ ( self : Optional[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
else:
lowercase__ = ""
lowercase__ = True
self.on_finalized_text(__lowercase, stream_end=__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : bool = False ):
print(__lowercase, flush=__lowercase, end="" if not stream_end else None )
def A__ ( self : Optional[int], __lowercase : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
class _snake_case ( lowercase__):
def __init__( self : Dict, __lowercase : "AutoTokenizer", __lowercase : bool = False, __lowercase : Optional[float] = None, **__lowercase : str ):
super().__init__(__lowercase, __lowercase, **__lowercase )
lowercase__ = Queue()
lowercase__ = None
lowercase__ = timeout
def A__ ( self : List[str], __lowercase : str, __lowercase : bool = False ):
self.text_queue.put(__lowercase, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : str ):
return self
def A__ ( self : Dict ):
lowercase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 368 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase : Dict = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = load_tool("""text-question-answering""" )
self.tool.setup()
__magic_name__ = load_tool("""text-question-answering""" , remote=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.tool(UpperCamelCase__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
__magic_name__ = self.remote_tool(UpperCamelCase__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.tool(text=UpperCamelCase__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.remote_tool(text=UpperCamelCase__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
| 88 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : List[str] = ort.SessionOptions()
a_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'A fantasy landscape, trending on artstation'
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Dict = output.images
a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : List[str] = init_image.resize((1_2_8, 1_2_8) )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Any = 'A fantasy landscape, trending on artstation'
a_ : Tuple = torch.manual_seed(0 )
a_ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32 | 0 |
import math
def UpperCamelCase( lowercase_ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase( lowercase_ = 0.1 ) -> int:
'''simple docstring'''
snake_case_ = 3
snake_case_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 34 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.